2024-12-13 17:17:08,026 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-13 17:17:08,037 main DEBUG Took 0.009045 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-13 17:17:08,037 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-13 17:17:08,037 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-13 17:17:08,038 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-13 17:17:08,039 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,045 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-13 17:17:08,055 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,056 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,057 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,057 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,058 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,058 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,059 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,059 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,059 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,060 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,060 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,061 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,061 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,062 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,062 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,062 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,063 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,063 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,063 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,064 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,064 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,064 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,065 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,065 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-13 17:17:08,065 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,065 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-13 17:17:08,067 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-13 17:17:08,068 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-13 17:17:08,069 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-13 17:17:08,070 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-13 17:17:08,071 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-13 17:17:08,071 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-13 17:17:08,080 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-13 17:17:08,082 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-13 17:17:08,084 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-13 17:17:08,084 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-13 17:17:08,084 main DEBUG createAppenders(={Console}) 2024-12-13 17:17:08,085 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc initialized 2024-12-13 17:17:08,085 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-13 17:17:08,086 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc OK. 2024-12-13 17:17:08,086 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-13 17:17:08,086 main DEBUG OutputStream closed 2024-12-13 17:17:08,087 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-13 17:17:08,087 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-13 17:17:08,087 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@53ce1329 OK 2024-12-13 17:17:08,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-13 17:17:08,163 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-13 17:17:08,165 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-13 17:17:08,166 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-13 17:17:08,167 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-13 17:17:08,167 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-13 17:17:08,168 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-13 17:17:08,168 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-13 17:17:08,169 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-13 17:17:08,169 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-13 17:17:08,170 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-13 17:17:08,170 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-13 17:17:08,171 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-13 17:17:08,171 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-13 17:17:08,171 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-13 17:17:08,172 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-13 17:17:08,172 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-13 17:17:08,173 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-13 17:17:08,176 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13 17:17:08,177 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-13 17:17:08,177 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-13 17:17:08,178 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-13T17:17:08,468 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075 2024-12-13 17:17:08,471 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-13 17:17:08,472 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-13T17:17:08,481 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-13T17:17:08,510 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=148, ProcessCount=11, AvailableMemoryMB=5577 2024-12-13T17:17:08,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T17:17:08,515 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52, deleteOnExit=true 2024-12-13T17:17:08,515 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T17:17:08,516 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/test.cache.data in system properties and HBase conf 2024-12-13T17:17:08,516 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T17:17:08,517 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir in system properties and HBase conf 2024-12-13T17:17:08,517 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T17:17:08,518 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T17:17:08,518 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T17:17:08,619 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-13T17:17:08,760 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T17:17:08,765 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:17:08,766 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:17:08,767 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T17:17:08,768 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:17:08,768 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T17:17:08,769 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T17:17:08,770 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:17:08,771 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:17:08,771 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T17:17:08,772 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/nfs.dump.dir in system properties and HBase conf 2024-12-13T17:17:08,773 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/java.io.tmpdir in system properties and HBase conf 2024-12-13T17:17:08,773 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:17:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T17:17:08,774 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T17:17:09,320 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:17:10,199 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-13T17:17:10,262 INFO [Time-limited test {}] log.Log(170): Logging initialized @2989ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-13T17:17:10,341 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:17:10,420 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:17:10,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:17:10,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:17:10,455 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:17:10,472 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:17:10,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22984d68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:17:10,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75fa8fc8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:17:10,662 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57f73325{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/java.io.tmpdir/jetty-localhost-34415-hadoop-hdfs-3_4_1-tests_jar-_-any-11317881687095712242/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:17:10,668 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@217e2a7b{HTTP/1.1, (http/1.1)}{localhost:34415} 2024-12-13T17:17:10,668 INFO [Time-limited test {}] server.Server(415): Started @3396ms 2024-12-13T17:17:10,690 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:17:11,140 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:17:11,148 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:17:11,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:17:11,150 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:17:11,150 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:17:11,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47fbae2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:17:11,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76bce909{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:17:11,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14caea3b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/java.io.tmpdir/jetty-localhost-44867-hadoop-hdfs-3_4_1-tests_jar-_-any-6483327513676366925/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:17:11,263 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32964f26{HTTP/1.1, (http/1.1)}{localhost:44867} 2024-12-13T17:17:11,263 INFO [Time-limited test {}] server.Server(415): Started @3990ms 2024-12-13T17:17:11,312 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:17:11,433 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:17:11,440 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:17:11,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:17:11,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:17:11,442 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:17:11,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56aaad8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:17:11,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa9ffed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:17:11,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c2b96d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/java.io.tmpdir/jetty-localhost-38815-hadoop-hdfs-3_4_1-tests_jar-_-any-1262341480924175818/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:17:11,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f8e335e{HTTP/1.1, (http/1.1)}{localhost:38815} 2024-12-13T17:17:11,568 INFO [Time-limited test {}] server.Server(415): Started @4295ms 2024-12-13T17:17:11,571 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:17:12,638 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data1/current/BP-1491248910-172.17.0.2-1734110229401/current, will proceed with Du for space computation calculation, 2024-12-13T17:17:12,638 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data2/current/BP-1491248910-172.17.0.2-1734110229401/current, will proceed with Du for space computation calculation, 2024-12-13T17:17:12,638 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data4/current/BP-1491248910-172.17.0.2-1734110229401/current, will proceed with Du for space computation calculation, 2024-12-13T17:17:12,638 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data3/current/BP-1491248910-172.17.0.2-1734110229401/current, will proceed with Du for space computation calculation, 2024-12-13T17:17:12,687 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:17:12,687 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:17:12,736 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ef7f832b14619d0 with lease ID 0x25bc17d5b210f03c: Processing first storage report for DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35 from datanode DatanodeRegistration(127.0.0.1:46779, datanodeUuid=83a6348b-0e3a-4fac-b80b-9445d1fd54a6, infoPort=41043, infoSecurePort=0, ipcPort=45379, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401) 2024-12-13T17:17:12,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ef7f832b14619d0 with lease ID 0x25bc17d5b210f03c: from storage DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35 node DatanodeRegistration(127.0.0.1:46779, datanodeUuid=83a6348b-0e3a-4fac-b80b-9445d1fd54a6, infoPort=41043, infoSecurePort=0, ipcPort=45379, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T17:17:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ef172e612f94994 with lease ID 0x25bc17d5b210f03b: Processing first storage report for DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894 from datanode DatanodeRegistration(127.0.0.1:44417, datanodeUuid=d5bb1e9b-6d6e-440b-a7df-6cc2fd82f76f, infoPort=33981, infoSecurePort=0, ipcPort=40289, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401) 2024-12-13T17:17:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ef172e612f94994 with lease ID 0x25bc17d5b210f03b: from storage DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894 node DatanodeRegistration(127.0.0.1:44417, datanodeUuid=d5bb1e9b-6d6e-440b-a7df-6cc2fd82f76f, infoPort=33981, infoSecurePort=0, ipcPort=40289, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:17:12,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ef7f832b14619d0 with lease ID 0x25bc17d5b210f03c: Processing first storage report for DS-85b9e253-6843-44d0-9c78-99113ec591ea from datanode DatanodeRegistration(127.0.0.1:46779, datanodeUuid=83a6348b-0e3a-4fac-b80b-9445d1fd54a6, infoPort=41043, infoSecurePort=0, ipcPort=45379, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401) 2024-12-13T17:17:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ef7f832b14619d0 with lease ID 0x25bc17d5b210f03c: from storage DS-85b9e253-6843-44d0-9c78-99113ec591ea node DatanodeRegistration(127.0.0.1:46779, datanodeUuid=83a6348b-0e3a-4fac-b80b-9445d1fd54a6, infoPort=41043, infoSecurePort=0, ipcPort=45379, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:17:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ef172e612f94994 with lease ID 0x25bc17d5b210f03b: Processing first storage report for DS-fa14e8d7-60c8-4053-ae41-a2a8385faba9 from datanode DatanodeRegistration(127.0.0.1:44417, datanodeUuid=d5bb1e9b-6d6e-440b-a7df-6cc2fd82f76f, infoPort=33981, infoSecurePort=0, ipcPort=40289, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401) 2024-12-13T17:17:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ef172e612f94994 with lease ID 0x25bc17d5b210f03b: from storage DS-fa14e8d7-60c8-4053-ae41-a2a8385faba9 node DatanodeRegistration(127.0.0.1:44417, datanodeUuid=d5bb1e9b-6d6e-440b-a7df-6cc2fd82f76f, infoPort=33981, infoSecurePort=0, ipcPort=40289, storageInfo=lv=-57;cid=testClusterID;nsid=740292520;c=1734110229401), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:17:12,771 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075 2024-12-13T17:17:12,837 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/zookeeper_0, clientPort=62632, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T17:17:12,849 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62632 2024-12-13T17:17:12,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:12,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:17:13,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:17:13,489 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e with version=8 2024-12-13T17:17:13,489 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/hbase-staging 2024-12-13T17:17:13,616 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-13T17:17:13,842 INFO [Time-limited test {}] client.ConnectionUtils(129): master/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:17:13,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:17:13,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:17:13,860 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:17:13,861 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:17:13,861 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:17:13,974 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:17:14,022 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-13T17:17:14,031 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-13T17:17:14,035 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:17:14,059 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 8654 (auto-detected) 2024-12-13T17:17:14,060 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-13T17:17:14,078 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40387 2024-12-13T17:17:14,086 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:14,088 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:14,100 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40387 connecting to ZooKeeper ensemble=127.0.0.1:62632 2024-12-13T17:17:14,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:403870x0, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:17:14,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40387-0x10020655d730000 connected 2024-12-13T17:17:14,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:17:14,245 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:17:14,248 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:17:14,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40387 2024-12-13T17:17:14,253 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40387 2024-12-13T17:17:14,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40387 2024-12-13T17:17:14,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40387 2024-12-13T17:17:14,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40387 2024-12-13T17:17:14,263 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e, hbase.cluster.distributed=false 2024-12-13T17:17:14,342 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:17:14,342 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:17:14,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:17:14,343 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:17:14,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:17:14,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:17:14,346 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:17:14,349 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:17:14,352 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35469 2024-12-13T17:17:14,355 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T17:17:14,363 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T17:17:14,365 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:14,369 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:14,373 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:35469 connecting to ZooKeeper ensemble=127.0.0.1:62632 2024-12-13T17:17:14,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354690x0, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:17:14,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:354690x0, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:17:14,384 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35469-0x10020655d730001 connected 2024-12-13T17:17:14,388 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:17:14,389 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:17:14,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35469 2024-12-13T17:17:14,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35469 2024-12-13T17:17:14,399 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35469 2024-12-13T17:17:14,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35469 2024-12-13T17:17:14,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35469 2024-12-13T17:17:14,408 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/bcc0a2319104,40387,1734110233611 2024-12-13T17:17:14,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:17:14,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:17:14,425 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bcc0a2319104:40387 2024-12-13T17:17:14,425 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bcc0a2319104,40387,1734110233611 2024-12-13T17:17:14,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:17:14,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:17:14,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:14,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:14,463 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:17:14,464 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bcc0a2319104,40387,1734110233611 from backup master directory 2024-12-13T17:17:14,464 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:17:14,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bcc0a2319104,40387,1734110233611 2024-12-13T17:17:14,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:17:14,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:17:14,474 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:17:14,474 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bcc0a2319104,40387,1734110233611 2024-12-13T17:17:14,476 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-13T17:17:14,477 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-13T17:17:14,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:17:14,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:17:14,544 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/hbase.id with ID: 21ebfb4e-ef65-459e-abac-8424e0e46266 2024-12-13T17:17:14,586 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:14,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:14,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:14,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:17:14,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:17:14,648 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:17:14,650 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T17:17:14,656 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:17:14,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:17:14,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:17:14,698 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store 2024-12-13T17:17:14,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:17:14,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:17:14,717 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-13T17:17:14,718 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:14,719 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:17:14,719 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:17:14,719 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:17:14,719 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:17:14,720 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:17:14,720 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:17:14,720 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:17:14,722 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/.initializing 2024-12-13T17:17:14,722 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/WALs/bcc0a2319104,40387,1734110233611 2024-12-13T17:17:14,735 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C40387%2C1734110233611, suffix=, logDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/WALs/bcc0a2319104,40387,1734110233611, archiveDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/oldWALs, maxLogs=10 2024-12-13T17:17:14,744 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C40387%2C1734110233611.1734110234741 2024-12-13T17:17:14,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-13T17:17:14,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-13T17:17:14,762 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/WALs/bcc0a2319104,40387,1734110233611/bcc0a2319104%2C40387%2C1734110233611.1734110234741 2024-12-13T17:17:14,769 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:17:14,769 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:17:14,770 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:14,773 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,775 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T17:17:14,853 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:14,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:17:14,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T17:17:14,861 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:14,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:17:14,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T17:17:14,867 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:14,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:17:14,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T17:17:14,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:14,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:17:14,878 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,879 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,890 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T17:17:14,894 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:17:14,898 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:17:14,899 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748101, jitterRate=-0.048740580677986145}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T17:17:14,903 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:17:14,905 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T17:17:14,935 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@240c1c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:17:14,966 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T17:17:14,979 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T17:17:14,979 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T17:17:14,982 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T17:17:14,983 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-13T17:17:14,989 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-13T17:17:14,989 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T17:17:15,015 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T17:17:15,025 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T17:17:15,031 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T17:17:15,033 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T17:17:15,035 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T17:17:15,039 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T17:17:15,041 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T17:17:15,045 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T17:17:15,054 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T17:17:15,056 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T17:17:15,065 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T17:17:15,074 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T17:17:15,081 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T17:17:15,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:17:15,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:17:15,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,090 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=bcc0a2319104,40387,1734110233611, sessionid=0x10020655d730000, setting cluster-up flag (Was=false) 2024-12-13T17:17:15,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,148 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T17:17:15,150 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,40387,1734110233611 2024-12-13T17:17:15,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,213 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T17:17:15,215 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,40387,1734110233611 2024-12-13T17:17:15,317 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T17:17:15,320 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bcc0a2319104:35469 2024-12-13T17:17:15,323 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1008): ClusterId : 21ebfb4e-ef65-459e-abac-8424e0e46266 2024-12-13T17:17:15,326 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T17:17:15,326 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T17:17:15,329 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T17:17:15,335 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bcc0a2319104,40387,1734110233611 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T17:17:15,339 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:17:15,340 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T17:17:15,340 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:17:15,340 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T17:17:15,340 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:17:15,340 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:17:15,340 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bcc0a2319104:0, corePoolSize=10, maxPoolSize=10 2024-12-13T17:17:15,341 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,341 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:17:15,341 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,342 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734110265342 2024-12-13T17:17:15,344 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T17:17:15,345 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:17:15,345 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T17:17:15,346 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T17:17:15,349 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T17:17:15,350 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:15,350 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T17:17:15,350 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:17:15,351 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T17:17:15,351 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T17:17:15,351 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T17:17:15,352 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,352 DEBUG [RS:0;bcc0a2319104:35469 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@398cbceb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:17:15,354 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T17:17:15,354 DEBUG [RS:0;bcc0a2319104:35469 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39e02e9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:17:15,355 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T17:17:15,356 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T17:17:15,358 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T17:17:15,359 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T17:17:15,359 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T17:17:15,360 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T17:17:15,361 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T17:17:15,361 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(3073): reportForDuty to master=bcc0a2319104,40387,1734110233611 with isa=bcc0a2319104/172.17.0.2:35469, startcode=1734110234341 2024-12-13T17:17:15,363 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110235362,5,FailOnTimeoutGroup] 2024-12-13T17:17:15,363 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110235363,5,FailOnTimeoutGroup] 2024-12-13T17:17:15,363 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,364 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T17:17:15,365 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,365 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:17:15,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:17:15,368 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T17:17:15,369 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e 2024-12-13T17:17:15,376 DEBUG [RS:0;bcc0a2319104:35469 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:17:15,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:17:15,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:17:15,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:15,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:17:15,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:17:15,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:15,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:17:15,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:17:15,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:17:15,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:15,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:17:15,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:17:15,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:17:15,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:15,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:17:15,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740 2024-12-13T17:17:15,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740 2024-12-13T17:17:15,418 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:17:15,423 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:17:15,431 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:17:15,435 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742663, jitterRate=-0.05565527081489563}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:17:15,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:17:15,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:17:15,438 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:17:15,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:17:15,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:17:15,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:17:15,440 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:17:15,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:17:15,443 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:17:15,443 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T17:17:15,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T17:17:15,458 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T17:17:15,461 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T17:17:15,464 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35315, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:17:15,470 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40387 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bcc0a2319104,35469,1734110234341 2024-12-13T17:17:15,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40387 {}] master.ServerManager(486): Registering regionserver=bcc0a2319104,35469,1734110234341 2024-12-13T17:17:15,488 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e 2024-12-13T17:17:15,488 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:32953 2024-12-13T17:17:15,488 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T17:17:15,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:17:15,498 DEBUG [RS:0;bcc0a2319104:35469 {}] zookeeper.ZKUtil(111): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bcc0a2319104,35469,1734110234341 2024-12-13T17:17:15,498 WARN [RS:0;bcc0a2319104:35469 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:17:15,499 INFO [RS:0;bcc0a2319104:35469 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:17:15,499 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341 2024-12-13T17:17:15,501 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bcc0a2319104,35469,1734110234341] 2024-12-13T17:17:15,511 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T17:17:15,520 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T17:17:15,529 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T17:17:15,531 INFO [RS:0;bcc0a2319104:35469 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:17:15,531 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,532 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T17:17:15,537 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,537 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,537 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:17:15,538 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:17:15,539 DEBUG [RS:0;bcc0a2319104:35469 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:17:15,539 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,539 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,539 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,539 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,540 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35469,1734110234341-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:17:15,554 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T17:17:15,555 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35469,1734110234341-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:15,570 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.Replication(204): bcc0a2319104,35469,1734110234341 started 2024-12-13T17:17:15,570 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1767): Serving as bcc0a2319104,35469,1734110234341, RpcServer on bcc0a2319104/172.17.0.2:35469, sessionid=0x10020655d730001 2024-12-13T17:17:15,571 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T17:17:15,571 DEBUG [RS:0;bcc0a2319104:35469 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bcc0a2319104,35469,1734110234341 2024-12-13T17:17:15,571 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,35469,1734110234341' 2024-12-13T17:17:15,571 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T17:17:15,572 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T17:17:15,572 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T17:17:15,572 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T17:17:15,572 DEBUG [RS:0;bcc0a2319104:35469 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bcc0a2319104,35469,1734110234341 2024-12-13T17:17:15,572 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,35469,1734110234341' 2024-12-13T17:17:15,572 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T17:17:15,573 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T17:17:15,573 DEBUG [RS:0;bcc0a2319104:35469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T17:17:15,574 INFO [RS:0;bcc0a2319104:35469 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T17:17:15,574 INFO [RS:0;bcc0a2319104:35469 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T17:17:15,612 WARN [bcc0a2319104:40387 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T17:17:15,690 INFO [RS:0;bcc0a2319104:35469 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C35469%2C1734110234341, suffix=, logDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341, archiveDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs, maxLogs=32 2024-12-13T17:17:15,692 INFO [RS:0;bcc0a2319104:35469 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110235692 2024-12-13T17:17:15,700 INFO [RS:0;bcc0a2319104:35469 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110235692 2024-12-13T17:17:15,701 DEBUG [RS:0;bcc0a2319104:35469 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33981:33981),(127.0.0.1/127.0.0.1:41043:41043)] 2024-12-13T17:17:15,863 DEBUG [bcc0a2319104:40387 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-13T17:17:15,870 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bcc0a2319104,35469,1734110234341 2024-12-13T17:17:15,876 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,35469,1734110234341, state=OPENING 2024-12-13T17:17:15,889 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T17:17:15,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:15,905 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:17:15,905 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:17:15,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=bcc0a2319104,35469,1734110234341}] 2024-12-13T17:17:16,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35469,1734110234341 2024-12-13T17:17:16,089 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T17:17:16,092 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T17:17:16,101 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T17:17:16,102 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:17:16,105 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C35469%2C1734110234341.meta, suffix=.meta, logDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341, archiveDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs, maxLogs=32 2024-12-13T17:17:16,107 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.meta.1734110236106.meta 2024-12-13T17:17:16,114 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.meta.1734110236106.meta 2024-12-13T17:17:16,115 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:17:16,115 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:17:16,117 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T17:17:16,166 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T17:17:16,171 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T17:17:16,176 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T17:17:16,177 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:16,177 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T17:17:16,177 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T17:17:16,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:17:16,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:17:16,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:16,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:17:16,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:17:16,186 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:17:16,186 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:16,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:17:16,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:17:16,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:17:16,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:16,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:17:16,192 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740 2024-12-13T17:17:16,195 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740 2024-12-13T17:17:16,197 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:17:16,200 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:17:16,201 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815689, jitterRate=0.03720220923423767}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:17:16,202 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:17:16,208 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734110236081 2024-12-13T17:17:16,218 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T17:17:16,218 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T17:17:16,219 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,35469,1734110234341 2024-12-13T17:17:16,221 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,35469,1734110234341, state=OPEN 2024-12-13T17:17:16,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:17:16,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:17:16,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:17:16,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:17:16,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T17:17:16,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=bcc0a2319104,35469,1734110234341 in 349 msec 2024-12-13T17:17:16,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T17:17:16,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 814 msec 2024-12-13T17:17:16,274 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.0080 sec 2024-12-13T17:17:16,275 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734110236275, completionTime=-1 2024-12-13T17:17:16,275 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-13T17:17:16,275 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T17:17:16,311 DEBUG [hconnection-0x64e99096-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:17:16,313 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:17:16,321 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-13T17:17:16,321 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734110296321 2024-12-13T17:17:16,321 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734110356321 2024-12-13T17:17:16,321 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 45 msec 2024-12-13T17:17:16,406 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,40387,1734110233611-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:16,407 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,40387,1734110233611-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:16,407 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,40387,1734110233611-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:16,409 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bcc0a2319104:40387, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:16,409 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:16,416 DEBUG [master/bcc0a2319104:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T17:17:16,418 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T17:17:16,420 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:17:16,424 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T17:17:16,427 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:17:16,428 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:16,429 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:17:16,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:17:16,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:17:16,444 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2a575812d6ea9a5ddbc77c5d1bb7a309, NAME => 'hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e 2024-12-13T17:17:16,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:17:16,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:17:16,454 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:16,455 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 2a575812d6ea9a5ddbc77c5d1bb7a309, disabling compactions & flushes 2024-12-13T17:17:16,455 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:17:16,455 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:17:16,455 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. after waiting 0 ms 2024-12-13T17:17:16,455 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:17:16,455 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:17:16,455 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2a575812d6ea9a5ddbc77c5d1bb7a309: 2024-12-13T17:17:16,457 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:17:16,463 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734110236458"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110236458"}]},"ts":"1734110236458"} 2024-12-13T17:17:16,487 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:17:16,489 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:17:16,492 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110236489"}]},"ts":"1734110236489"} 2024-12-13T17:17:16,497 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T17:17:16,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2a575812d6ea9a5ddbc77c5d1bb7a309, ASSIGN}] 2024-12-13T17:17:16,560 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=2a575812d6ea9a5ddbc77c5d1bb7a309, ASSIGN 2024-12-13T17:17:16,561 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=2a575812d6ea9a5ddbc77c5d1bb7a309, ASSIGN; state=OFFLINE, location=bcc0a2319104,35469,1734110234341; forceNewPlan=false, retain=false 2024-12-13T17:17:16,712 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2a575812d6ea9a5ddbc77c5d1bb7a309, regionState=OPENING, regionLocation=bcc0a2319104,35469,1734110234341 2024-12-13T17:17:16,718 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 2a575812d6ea9a5ddbc77c5d1bb7a309, server=bcc0a2319104,35469,1734110234341}] 2024-12-13T17:17:16,872 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35469,1734110234341 2024-12-13T17:17:16,878 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:17:16,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 2a575812d6ea9a5ddbc77c5d1bb7a309, NAME => 'hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:17:16,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:17:16,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:16,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:17:16,879 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:17:16,882 INFO [StoreOpener-2a575812d6ea9a5ddbc77c5d1bb7a309-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:17:16,884 INFO [StoreOpener-2a575812d6ea9a5ddbc77c5d1bb7a309-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a575812d6ea9a5ddbc77c5d1bb7a309 columnFamilyName info 2024-12-13T17:17:16,884 DEBUG [StoreOpener-2a575812d6ea9a5ddbc77c5d1bb7a309-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:16,885 INFO [StoreOpener-2a575812d6ea9a5ddbc77c5d1bb7a309-1 {}] regionserver.HStore(327): Store=2a575812d6ea9a5ddbc77c5d1bb7a309/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:17:16,886 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:17:16,887 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:17:16,890 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:17:16,893 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:17:16,894 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 2a575812d6ea9a5ddbc77c5d1bb7a309; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863006, jitterRate=0.09737010300159454}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:17:16,895 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 2a575812d6ea9a5ddbc77c5d1bb7a309: 2024-12-13T17:17:16,897 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309., pid=6, masterSystemTime=1734110236872 2024-12-13T17:17:16,900 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:17:16,900 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:17:16,901 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=2a575812d6ea9a5ddbc77c5d1bb7a309, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,35469,1734110234341 2024-12-13T17:17:16,908 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T17:17:16,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 2a575812d6ea9a5ddbc77c5d1bb7a309, server=bcc0a2319104,35469,1734110234341 in 186 msec 2024-12-13T17:17:16,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T17:17:16,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=2a575812d6ea9a5ddbc77c5d1bb7a309, ASSIGN in 351 msec 2024-12-13T17:17:16,912 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:17:16,913 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110236912"}]},"ts":"1734110236912"} 2024-12-13T17:17:16,915 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T17:17:16,930 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T17:17:16,931 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:17:16,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 511 msec 2024-12-13T17:17:16,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:16,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:17:16,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:17:16,981 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T17:17:17,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:17:17,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 90 msec 2024-12-13T17:17:17,066 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T17:17:17,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:17:17,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 92 msec 2024-12-13T17:17:17,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T17:17:17,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T17:17:17,305 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.830sec 2024-12-13T17:17:17,306 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T17:17:17,309 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T17:17:17,311 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T17:17:17,312 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T17:17:17,312 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T17:17:17,313 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,40387,1734110233611-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:17:17,313 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,40387,1734110233611-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T17:17:17,319 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T17:17:17,320 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T17:17:17,320 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,40387,1734110233611-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:17:17,325 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c22e283 to 127.0.0.1:62632 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a869e3d 2024-12-13T17:17:17,325 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-13T17:17:17,402 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d82939, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:17:17,406 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-13T17:17:17,406 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-13T17:17:17,417 DEBUG [hconnection-0x4790de12-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:17:17,456 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:17:17,463 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=bcc0a2319104,40387,1734110233611 2024-12-13T17:17:17,464 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:17:17,470 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-13T17:17:17,478 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T17:17:17,480 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43800, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T17:17:17,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40387 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-13T17:17:17,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40387 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-13T17:17:17,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40387 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:17:17,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40387 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-13T17:17:17,494 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:17:17,494 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:17,496 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:17:17,496 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40387 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-13T17:17:17,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40387 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:17:17,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741837_1013 (size=389) 2024-12-13T17:17:17,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741837_1013 (size=389) 2024-12-13T17:17:17,509 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 82c8bc6c6af3232621b8c9cc7d7e8dc3, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e 2024-12-13T17:17:17,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741838_1014 (size=72) 2024-12-13T17:17:17,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741838_1014 (size=72) 2024-12-13T17:17:17,520 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:17,520 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 82c8bc6c6af3232621b8c9cc7d7e8dc3, disabling compactions & flushes 2024-12-13T17:17:17,520 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:17,520 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:17,520 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. after waiting 0 ms 2024-12-13T17:17:17,520 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:17,520 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:17,520 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:17:17,522 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:17:17,522 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1734110237522"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110237522"}]},"ts":"1734110237522"} 2024-12-13T17:17:17,525 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:17:17,526 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:17:17,527 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110237526"}]},"ts":"1734110237526"} 2024-12-13T17:17:17,529 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-13T17:17:17,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=82c8bc6c6af3232621b8c9cc7d7e8dc3, ASSIGN}] 2024-12-13T17:17:17,617 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=82c8bc6c6af3232621b8c9cc7d7e8dc3, ASSIGN 2024-12-13T17:17:17,621 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=82c8bc6c6af3232621b8c9cc7d7e8dc3, ASSIGN; state=OFFLINE, location=bcc0a2319104,35469,1734110234341; forceNewPlan=false, retain=false 2024-12-13T17:17:17,771 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=82c8bc6c6af3232621b8c9cc7d7e8dc3, regionState=OPENING, regionLocation=bcc0a2319104,35469,1734110234341 2024-12-13T17:17:17,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 82c8bc6c6af3232621b8c9cc7d7e8dc3, server=bcc0a2319104,35469,1734110234341}] 2024-12-13T17:17:17,931 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35469,1734110234341 2024-12-13T17:17:17,944 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:17,945 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 82c8bc6c6af3232621b8c9cc7d7e8dc3, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:17:17,945 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:17,946 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:17:17,946 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:17,946 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:17,950 INFO [StoreOpener-82c8bc6c6af3232621b8c9cc7d7e8dc3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:17,952 INFO [StoreOpener-82c8bc6c6af3232621b8c9cc7d7e8dc3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 82c8bc6c6af3232621b8c9cc7d7e8dc3 columnFamilyName info 2024-12-13T17:17:17,953 DEBUG [StoreOpener-82c8bc6c6af3232621b8c9cc7d7e8dc3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:17:17,954 INFO [StoreOpener-82c8bc6c6af3232621b8c9cc7d7e8dc3-1 {}] regionserver.HStore(327): Store=82c8bc6c6af3232621b8c9cc7d7e8dc3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:17:17,956 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:17,957 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:17,961 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:17,965 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:17:17,966 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 82c8bc6c6af3232621b8c9cc7d7e8dc3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765390, jitterRate=-0.026756733655929565}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:17:17,967 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:17:17,968 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3., pid=11, masterSystemTime=1734110237931 2024-12-13T17:17:17,972 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:17,972 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:17,972 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=82c8bc6c6af3232621b8c9cc7d7e8dc3, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,35469,1734110234341 2024-12-13T17:17:17,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T17:17:17,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 82c8bc6c6af3232621b8c9cc7d7e8dc3, server=bcc0a2319104,35469,1734110234341 in 200 msec 2024-12-13T17:17:17,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T17:17:17,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=82c8bc6c6af3232621b8c9cc7d7e8dc3, ASSIGN in 368 msec 2024-12-13T17:17:17,986 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:17:17,987 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110237986"}]},"ts":"1734110237986"} 2024-12-13T17:17:17,990 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-13T17:17:18,033 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:17:18,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 543 msec 2024-12-13T17:17:21,688 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-13T17:17:21,743 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T17:17:21,744 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T17:17:21,745 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-13T17:17:24,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-13T17:17:24,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-13T17:17:24,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-13T17:17:24,022 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-13T17:17:24,022 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T17:17:24,022 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-13T17:17:24,023 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T17:17:24,023 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-13T17:17:24,024 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-13T17:17:24,024 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-13T17:17:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40387 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:17:27,509 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-13T17:17:27,514 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-13T17:17:27,515 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:17:27,516 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110247516 2024-12-13T17:17:27,526 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110235692 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110247516 2024-12-13T17:17:27,527 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:17:27,527 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110235692 is not closed yet, will try archiving it next time 2024-12-13T17:17:27,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741833_1009 (size=955) 2024-12-13T17:17:27,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741833_1009 (size=955) 2024-12-13T17:17:39,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35469 {}] regionserver.HRegion(8581): Flush requested on 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:39,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 82c8bc6c6af3232621b8c9cc7d7e8dc3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:17:39,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/57d87141c6574dda8e1f14c5bfd7b6b4 is 1080, key is row0001/info:/1734110247533/Put/seqid=0 2024-12-13T17:17:39,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741840_1016 (size=12509) 2024-12-13T17:17:39,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741840_1016 (size=12509) 2024-12-13T17:17:39,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/57d87141c6574dda8e1f14c5bfd7b6b4 2024-12-13T17:17:39,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/57d87141c6574dda8e1f14c5bfd7b6b4 as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4 2024-12-13T17:17:39,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4, entries=7, sequenceid=11, filesize=12.2 K 2024-12-13T17:17:39,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 82c8bc6c6af3232621b8c9cc7d7e8dc3 in 125ms, sequenceid=11, compaction requested=false 2024-12-13T17:17:39,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:17:42,768 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:17:47,580 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110267579 2024-12-13T17:17:47,793 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:47,794 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110247516 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110267579 2024-12-13T17:17:47,794 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:17:47,795 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110247516 is not closed yet, will try archiving it next time 2024-12-13T17:17:47,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741839_1015 (size=12399) 2024-12-13T17:17:47,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741839_1015 (size=12399) 2024-12-13T17:17:47,998 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:48,357 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:17:48,359 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:17:50,204 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:52,408 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:54,651 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 237 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:54,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35469 {}] regionserver.HRegion(8581): Flush requested on 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:17:54,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 82c8bc6c6af3232621b8c9cc7d7e8dc3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:17:54,854 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:54,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/9812220c5b7e4b2b916de4659eedf373 is 1080, key is row0008/info:/1734110261565/Put/seqid=0 2024-12-13T17:17:54,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741842_1018 (size=12509) 2024-12-13T17:17:54,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741842_1018 (size=12509) 2024-12-13T17:17:54,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/9812220c5b7e4b2b916de4659eedf373 2024-12-13T17:17:54,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/9812220c5b7e4b2b916de4659eedf373 as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/9812220c5b7e4b2b916de4659eedf373 2024-12-13T17:17:54,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/9812220c5b7e4b2b916de4659eedf373, entries=7, sequenceid=21, filesize=12.2 K 2024-12-13T17:17:55,095 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:55,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 82c8bc6c6af3232621b8c9cc7d7e8dc3 in 444ms, sequenceid=21, compaction requested=false 2024-12-13T17:17:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:17:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-13T17:17:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:17:55,098 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4 because midkey is the same as first or last row 2024-12-13T17:17:56,855 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:57,535 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T17:17:57,535 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T17:17:59,060 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:59,061 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C35469%2C1734110234341:(num 1734110267579) roll requested 2024-12-13T17:17:59,061 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:59,062 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110279061 2024-12-13T17:17:59,272 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:59,473 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:17:59,475 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110267579 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110279061 2024-12-13T17:17:59,475 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33981:33981),(127.0.0.1/127.0.0.1:41043:41043)] 2024-12-13T17:17:59,475 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110267579 is not closed yet, will try archiving it next time 2024-12-13T17:17:59,477 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110247516 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs/bcc0a2319104%2C35469%2C1734110234341.1734110247516 2024-12-13T17:17:59,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741841_1017 (size=7739) 2024-12-13T17:17:59,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741841_1017 (size=7739) 2024-12-13T17:18:01,263 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:02,946 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 82c8bc6c6af3232621b8c9cc7d7e8dc3, had cached 0 bytes from a total of 25018 2024-12-13T17:18:03,466 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:05,670 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:07,873 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:09,877 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T17:18:09,879 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110289878 2024-12-13T17:18:12,769 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:18:14,888 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:14,889 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:14,889 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C35469%2C1734110234341:(num 1734110289878) roll requested 2024-12-13T17:18:17,722 DEBUG [master/bcc0a2319104:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 2a575812d6ea9a5ddbc77c5d1bb7a309 changed from -1.0 to 0.0, refreshing cache 2024-12-13T17:18:19,889 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:19,890 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:19,891 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110279061 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110289878 2024-12-13T17:18:19,891 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:18:19,892 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110279061 is not closed yet, will try archiving it next time 2024-12-13T17:18:19,893 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110299892 2024-12-13T17:18:19,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741843_1019 (size=4753) 2024-12-13T17:18:19,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741843_1019 (size=4753) 2024-12-13T17:18:24,896 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:24,897 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:24,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35469 {}] regionserver.HRegion(8581): Flush requested on 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:18:24,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 82c8bc6c6af3232621b8c9cc7d7e8dc3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:18:24,908 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:24,908 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:26,898 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T17:18:29,900 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:29,900 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:29,909 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:29,909 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:29,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/f69c53750100431fb5dd7e2606a411a8 is 1080, key is row0015/info:/1734110276653/Put/seqid=0 2024-12-13T17:18:29,910 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110289878 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110299892 2024-12-13T17:18:29,910 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33981:33981),(127.0.0.1/127.0.0.1:41043:41043)] 2024-12-13T17:18:29,911 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110289878 is not closed yet, will try archiving it next time 2024-12-13T17:18:29,911 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C35469%2C1734110234341:(num 1734110299892) roll requested 2024-12-13T17:18:29,911 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110309911 2024-12-13T17:18:29,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741844_1020 (size=1569) 2024-12-13T17:18:29,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741844_1020 (size=1569) 2024-12-13T17:18:29,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741846_1022 (size=12509) 2024-12-13T17:18:29,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741846_1022 (size=12509) 2024-12-13T17:18:29,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/f69c53750100431fb5dd7e2606a411a8 2024-12-13T17:18:29,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/f69c53750100431fb5dd7e2606a411a8 as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/f69c53750100431fb5dd7e2606a411a8 2024-12-13T17:18:29,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/f69c53750100431fb5dd7e2606a411a8, entries=7, sequenceid=31, filesize=12.2 K 2024-12-13T17:18:34,921 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:34,921 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:34,937 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:34,937 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:34,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 82c8bc6c6af3232621b8c9cc7d7e8dc3 in 10040ms, sequenceid=31, compaction requested=true 2024-12-13T17:18:34,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:18:34,937 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-13T17:18:34,937 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:18:34,938 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4 because midkey is the same as first or last row 2024-12-13T17:18:34,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 82c8bc6c6af3232621b8c9cc7d7e8dc3:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:18:34,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:18:34,940 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:18:34,943 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:18:34,945 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.HStore(1540): 82c8bc6c6af3232621b8c9cc7d7e8dc3/info is initiating minor compaction (all files) 2024-12-13T17:18:34,945 INFO [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 82c8bc6c6af3232621b8c9cc7d7e8dc3/info in TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:18:34,946 INFO [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4, hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/9812220c5b7e4b2b916de4659eedf373, hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/f69c53750100431fb5dd7e2606a411a8] into tmpdir=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp, totalSize=36.6 K 2024-12-13T17:18:34,947 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57d87141c6574dda8e1f14c5bfd7b6b4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1734110247533 2024-12-13T17:18:34,948 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9812220c5b7e4b2b916de4659eedf373, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1734110261565 2024-12-13T17:18:34,949 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] compactions.Compactor(224): Compacting f69c53750100431fb5dd7e2606a411a8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1734110276653 2024-12-13T17:18:34,996 INFO [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 82c8bc6c6af3232621b8c9cc7d7e8dc3#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:18:34,997 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/aa468667d4294b3e9967887ced243283 is 1080, key is row0001/info:/1734110247533/Put/seqid=0 2024-12-13T17:18:35,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741848_1024 (size=27710) 2024-12-13T17:18:35,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741848_1024 (size=27710) 2024-12-13T17:18:35,021 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/aa468667d4294b3e9967887ced243283 as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/aa468667d4294b3e9967887ced243283 2024-12-13T17:18:39,922 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:39,922 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK], DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK]] 2024-12-13T17:18:39,923 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110299892 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110309911 2024-12-13T17:18:39,923 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:18:39,923 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110299892 is not closed yet, will try archiving it next time 2024-12-13T17:18:39,923 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110267579 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs/bcc0a2319104%2C35469%2C1734110234341.1734110267579 2024-12-13T17:18:39,923 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C35469%2C1734110234341:(num 1734110319923) roll requested 2024-12-13T17:18:39,923 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110319923 2024-12-13T17:18:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741845_1021 (size=438) 2024-12-13T17:18:39,925 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110279061 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs/bcc0a2319104%2C35469%2C1734110234341.1734110279061 2024-12-13T17:18:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741845_1021 (size=438) 2024-12-13T17:18:39,927 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110289878 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs/bcc0a2319104%2C35469%2C1734110234341.1734110289878 2024-12-13T17:18:39,928 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110299892 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs/bcc0a2319104%2C35469%2C1734110234341.1734110299892 2024-12-13T17:18:42,769 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:18:44,925 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:44,926 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:44,929 INFO [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 82c8bc6c6af3232621b8c9cc7d7e8dc3/info of 82c8bc6c6af3232621b8c9cc7d7e8dc3 into aa468667d4294b3e9967887ced243283(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 9sec to execute. 2024-12-13T17:18:44,930 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:18:44,930 INFO [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3., storeName=82c8bc6c6af3232621b8c9cc7d7e8dc3/info, priority=13, startTime=1734110314939; duration=9sec 2024-12-13T17:18:44,931 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-13T17:18:44,931 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:18:44,931 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/aa468667d4294b3e9967887ced243283 because midkey is the same as first or last row 2024-12-13T17:18:44,931 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:18:44,932 DEBUG [RS:0;bcc0a2319104:35469-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 82c8bc6c6af3232621b8c9cc7d7e8dc3:info 2024-12-13T17:18:44,932 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:44,933 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46779,DS-8eb1c0cd-e20f-43e9-a2a3-6ce779385c35,DISK], DatanodeInfoWithStorage[127.0.0.1:44417,DS-9cfb61d3-4c62-4b74-a29e-ae78c6509894,DISK]] 2024-12-13T17:18:44,934 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110309911 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110319923 2024-12-13T17:18:44,934 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33981:33981),(127.0.0.1/127.0.0.1:41043:41043)] 2024-12-13T17:18:44,934 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110309911 is not closed yet, will try archiving it next time 2024-12-13T17:18:44,935 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110324934 2024-12-13T17:18:44,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741847_1023 (size=539) 2024-12-13T17:18:44,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741847_1023 (size=539) 2024-12-13T17:18:44,938 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110309911 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs/bcc0a2319104%2C35469%2C1734110234341.1734110309911 2024-12-13T17:18:44,943 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110319923 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110324934 2024-12-13T17:18:44,943 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:18:44,943 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110319923 is not closed yet, will try archiving it next time 2024-12-13T17:18:44,943 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C35469%2C1734110234341:(num 1734110324934) roll requested 2024-12-13T17:18:44,944 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35469%2C1734110234341.1734110324943 2024-12-13T17:18:44,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741849_1025 (size=1258) 2024-12-13T17:18:44,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741849_1025 (size=1258) 2024-12-13T17:18:44,951 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110324934 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110324943 2024-12-13T17:18:44,951 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41043:41043),(127.0.0.1/127.0.0.1:33981:33981)] 2024-12-13T17:18:44,951 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110324934 is not closed yet, will try archiving it next time 2024-12-13T17:18:44,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741850_1026 (size=93) 2024-12-13T17:18:44,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741850_1026 (size=93) 2024-12-13T17:18:44,954 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341/bcc0a2319104%2C35469%2C1734110234341.1734110324934 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs/bcc0a2319104%2C35469%2C1734110234341.1734110324934 2024-12-13T17:18:47,946 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 82c8bc6c6af3232621b8c9cc7d7e8dc3, had cached 0 bytes from a total of 27710 2024-12-13T17:18:56,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35469 {}] regionserver.HRegion(8581): Flush requested on 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:18:56,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 82c8bc6c6af3232621b8c9cc7d7e8dc3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:18:56,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/899546b1293f4e7898e45ca2e1eb1c6e is 1080, key is row0022/info:/1734110324936/Put/seqid=0 2024-12-13T17:18:56,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741852_1028 (size=12509) 2024-12-13T17:18:56,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741852_1028 (size=12509) 2024-12-13T17:18:56,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/899546b1293f4e7898e45ca2e1eb1c6e 2024-12-13T17:18:56,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/899546b1293f4e7898e45ca2e1eb1c6e as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/899546b1293f4e7898e45ca2e1eb1c6e 2024-12-13T17:18:56,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/899546b1293f4e7898e45ca2e1eb1c6e, entries=7, sequenceid=42, filesize=12.2 K 2024-12-13T17:18:56,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 82c8bc6c6af3232621b8c9cc7d7e8dc3 in 34ms, sequenceid=42, compaction requested=false 2024-12-13T17:18:56,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:18:56,994 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-13T17:18:56,994 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:18:56,994 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/aa468667d4294b3e9967887ced243283 because midkey is the same as first or last row 2024-12-13T17:19:04,970 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T17:19:04,970 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T17:19:04,970 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c22e283 to 127.0.0.1:62632 2024-12-13T17:19:04,971 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:04,971 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T17:19:04,972 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=791946289, stopped=false 2024-12-13T17:19:04,972 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=bcc0a2319104,40387,1734110233611 2024-12-13T17:19:05,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:19:05,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:19:05,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:05,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:05,018 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T17:19:05,018 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:05,019 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,35469,1734110234341' ***** 2024-12-13T17:19:05,019 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T17:19:05,019 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:19:05,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:19:05,021 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T17:19:05,021 INFO [RS:0;bcc0a2319104:35469 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T17:19:05,021 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T17:19:05,021 INFO [RS:0;bcc0a2319104:35469 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T17:19:05,022 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(3579): Received CLOSE for 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:19:05,022 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(3579): Received CLOSE for 2a575812d6ea9a5ddbc77c5d1bb7a309 2024-12-13T17:19:05,023 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,35469,1734110234341 2024-12-13T17:19:05,023 DEBUG [RS:0;bcc0a2319104:35469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:05,023 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T17:19:05,023 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T17:19:05,023 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T17:19:05,023 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 82c8bc6c6af3232621b8c9cc7d7e8dc3, disabling compactions & flushes 2024-12-13T17:19:05,023 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T17:19:05,023 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:19:05,023 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:19:05,023 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. after waiting 0 ms 2024-12-13T17:19:05,024 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:19:05,024 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-13T17:19:05,024 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1603): Online Regions={82c8bc6c6af3232621b8c9cc7d7e8dc3=TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3., 2a575812d6ea9a5ddbc77c5d1bb7a309=hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309., 1588230740=hbase:meta,,1.1588230740} 2024-12-13T17:19:05,024 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:19:05,024 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 82c8bc6c6af3232621b8c9cc7d7e8dc3 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-13T17:19:05,024 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:19:05,024 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:19:05,024 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:19:05,024 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:19:05,025 DEBUG [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2a575812d6ea9a5ddbc77c5d1bb7a309, 82c8bc6c6af3232621b8c9cc7d7e8dc3 2024-12-13T17:19:05,025 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-13T17:19:05,030 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/609569330b8141de901f720b7414d8ef is 1080, key is row0029/info:/1734110338962/Put/seqid=0 2024-12-13T17:19:05,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741853_1029 (size=8193) 2024-12-13T17:19:05,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741853_1029 (size=8193) 2024-12-13T17:19:05,037 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/609569330b8141de901f720b7414d8ef 2024-12-13T17:19:05,046 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/.tmp/info/d79a3282cd9949a5b46036d7a7468c6f is 195, key is TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3./info:regioninfo/1734110237972/Put/seqid=0 2024-12-13T17:19:05,046 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/.tmp/info/609569330b8141de901f720b7414d8ef as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/609569330b8141de901f720b7414d8ef 2024-12-13T17:19:05,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741854_1030 (size=8172) 2024-12-13T17:19:05,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741854_1030 (size=8172) 2024-12-13T17:19:05,052 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/.tmp/info/d79a3282cd9949a5b46036d7a7468c6f 2024-12-13T17:19:05,057 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/609569330b8141de901f720b7414d8ef, entries=3, sequenceid=48, filesize=8.0 K 2024-12-13T17:19:05,058 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 82c8bc6c6af3232621b8c9cc7d7e8dc3 in 34ms, sequenceid=48, compaction requested=true 2024-12-13T17:19:05,059 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4, hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/9812220c5b7e4b2b916de4659eedf373, hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/f69c53750100431fb5dd7e2606a411a8] to archive 2024-12-13T17:19:05,063 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T17:19:05,069 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/f69c53750100431fb5dd7e2606a411a8 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/f69c53750100431fb5dd7e2606a411a8 2024-12-13T17:19:05,070 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/9812220c5b7e4b2b916de4659eedf373 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/9812220c5b7e4b2b916de4659eedf373 2024-12-13T17:19:05,070 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4 to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/info/57d87141c6574dda8e1f14c5bfd7b6b4 2024-12-13T17:19:05,078 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/.tmp/table/52bfca94286d4e5dace3a8140a2e618b is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1734110237986/Put/seqid=0 2024-12-13T17:19:05,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741855_1031 (size=5452) 2024-12-13T17:19:05,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741855_1031 (size=5452) 2024-12-13T17:19:05,087 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/.tmp/table/52bfca94286d4e5dace3a8140a2e618b 2024-12-13T17:19:05,091 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/default/TestLogRolling-testSlowSyncLogRolling/82c8bc6c6af3232621b8c9cc7d7e8dc3/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-13T17:19:05,093 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:19:05,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 82c8bc6c6af3232621b8c9cc7d7e8dc3: 2024-12-13T17:19:05,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1734110237487.82c8bc6c6af3232621b8c9cc7d7e8dc3. 2024-12-13T17:19:05,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2a575812d6ea9a5ddbc77c5d1bb7a309, disabling compactions & flushes 2024-12-13T17:19:05,094 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:19:05,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:19:05,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. after waiting 0 ms 2024-12-13T17:19:05,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:19:05,094 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 2a575812d6ea9a5ddbc77c5d1bb7a309 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T17:19:05,097 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/.tmp/info/d79a3282cd9949a5b46036d7a7468c6f as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/info/d79a3282cd9949a5b46036d7a7468c6f 2024-12-13T17:19:05,105 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/info/d79a3282cd9949a5b46036d7a7468c6f, entries=20, sequenceid=14, filesize=8.0 K 2024-12-13T17:19:05,107 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/.tmp/table/52bfca94286d4e5dace3a8140a2e618b as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/table/52bfca94286d4e5dace3a8140a2e618b 2024-12-13T17:19:05,113 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309/.tmp/info/f89d845bfca14b2d9d76efcb6812cd28 is 45, key is default/info:d/1734110236992/Put/seqid=0 2024-12-13T17:19:05,116 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/table/52bfca94286d4e5dace3a8140a2e618b, entries=4, sequenceid=14, filesize=5.3 K 2024-12-13T17:19:05,118 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 93ms, sequenceid=14, compaction requested=false 2024-12-13T17:19:05,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741856_1032 (size=5037) 2024-12-13T17:19:05,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741856_1032 (size=5037) 2024-12-13T17:19:05,122 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309/.tmp/info/f89d845bfca14b2d9d76efcb6812cd28 2024-12-13T17:19:05,125 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-13T17:19:05,125 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T17:19:05,126 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:19:05,126 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:19:05,126 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T17:19:05,131 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309/.tmp/info/f89d845bfca14b2d9d76efcb6812cd28 as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309/info/f89d845bfca14b2d9d76efcb6812cd28 2024-12-13T17:19:05,139 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309/info/f89d845bfca14b2d9d76efcb6812cd28, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T17:19:05,140 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 2a575812d6ea9a5ddbc77c5d1bb7a309 in 46ms, sequenceid=6, compaction requested=false 2024-12-13T17:19:05,146 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/data/hbase/namespace/2a575812d6ea9a5ddbc77c5d1bb7a309/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T17:19:05,147 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:19:05,147 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2a575812d6ea9a5ddbc77c5d1bb7a309: 2024-12-13T17:19:05,147 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734110236419.2a575812d6ea9a5ddbc77c5d1bb7a309. 2024-12-13T17:19:05,225 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,35469,1734110234341; all regions closed. 2024-12-13T17:19:05,226 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341 2024-12-13T17:19:05,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741834_1010 (size=4330) 2024-12-13T17:19:05,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741834_1010 (size=4330) 2024-12-13T17:19:05,231 DEBUG [RS:0;bcc0a2319104:35469 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs 2024-12-13T17:19:05,231 INFO [RS:0;bcc0a2319104:35469 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C35469%2C1734110234341.meta:.meta(num 1734110236106) 2024-12-13T17:19:05,232 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/WALs/bcc0a2319104,35469,1734110234341 2024-12-13T17:19:05,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741851_1027 (size=13066) 2024-12-13T17:19:05,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741851_1027 (size=13066) 2024-12-13T17:19:05,239 DEBUG [RS:0;bcc0a2319104:35469 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/oldWALs 2024-12-13T17:19:05,239 INFO [RS:0;bcc0a2319104:35469 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C35469%2C1734110234341:(num 1734110324943) 2024-12-13T17:19:05,239 DEBUG [RS:0;bcc0a2319104:35469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:05,239 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:19:05,240 INFO [RS:0;bcc0a2319104:35469 {}] hbase.ChoreService(370): Chore service for: regionserver/bcc0a2319104:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-13T17:19:05,240 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:19:05,240 INFO [RS:0;bcc0a2319104:35469 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35469 2024-12-13T17:19:05,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bcc0a2319104,35469,1734110234341 2024-12-13T17:19:05,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:19:05,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bcc0a2319104,35469,1734110234341] 2024-12-13T17:19:05,267 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bcc0a2319104,35469,1734110234341; numProcessing=1 2024-12-13T17:19:05,275 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bcc0a2319104,35469,1734110234341 already deleted, retry=false 2024-12-13T17:19:05,275 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bcc0a2319104,35469,1734110234341 expired; onlineServers=0 2024-12-13T17:19:05,275 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,40387,1734110233611' ***** 2024-12-13T17:19:05,275 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T17:19:05,275 DEBUG [M:0;bcc0a2319104:40387 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2788a8c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:19:05,276 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,40387,1734110233611 2024-12-13T17:19:05,276 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,40387,1734110233611; all regions closed. 2024-12-13T17:19:05,276 DEBUG [M:0;bcc0a2319104:40387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:05,276 DEBUG [M:0;bcc0a2319104:40387 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T17:19:05,276 DEBUG [M:0;bcc0a2319104:40387 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T17:19:05,276 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T17:19:05,276 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110235362 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110235362,5,FailOnTimeoutGroup] 2024-12-13T17:19:05,276 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110235363 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110235363,5,FailOnTimeoutGroup] 2024-12-13T17:19:05,276 INFO [M:0;bcc0a2319104:40387 {}] hbase.ChoreService(370): Chore service for: master/bcc0a2319104:0 had [] on shutdown 2024-12-13T17:19:05,277 DEBUG [M:0;bcc0a2319104:40387 {}] master.HMaster(1733): Stopping service threads 2024-12-13T17:19:05,277 INFO [M:0;bcc0a2319104:40387 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T17:19:05,277 INFO [M:0;bcc0a2319104:40387 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T17:19:05,277 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T17:19:05,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T17:19:05,283 DEBUG [M:0;bcc0a2319104:40387 {}] zookeeper.ZKUtil(347): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T17:19:05,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:05,284 WARN [M:0;bcc0a2319104:40387 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T17:19:05,284 INFO [M:0;bcc0a2319104:40387 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T17:19:05,284 INFO [M:0;bcc0a2319104:40387 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T17:19:05,284 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:19:05,284 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:19:05,284 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:05,284 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:05,284 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:19:05,284 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:05,285 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.22 KB heapSize=50.15 KB 2024-12-13T17:19:05,308 DEBUG [M:0;bcc0a2319104:40387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c27471771c048a7a1d4ce32a76b47cd is 82, key is hbase:meta,,1/info:regioninfo/1734110236219/Put/seqid=0 2024-12-13T17:19:05,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741857_1033 (size=5672) 2024-12-13T17:19:05,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741857_1033 (size=5672) 2024-12-13T17:19:05,315 INFO [M:0;bcc0a2319104:40387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c27471771c048a7a1d4ce32a76b47cd 2024-12-13T17:19:05,336 DEBUG [M:0;bcc0a2319104:40387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b75155fcaab449da893d4c1598222864 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734110238035/Put/seqid=0 2024-12-13T17:19:05,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741858_1034 (size=6427) 2024-12-13T17:19:05,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741858_1034 (size=6427) 2024-12-13T17:19:05,342 INFO [M:0;bcc0a2319104:40387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.62 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b75155fcaab449da893d4c1598222864 2024-12-13T17:19:05,348 INFO [M:0;bcc0a2319104:40387 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b75155fcaab449da893d4c1598222864 2024-12-13T17:19:05,362 DEBUG [M:0;bcc0a2319104:40387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ddfc0ca08f4b41879a173e4287de028d is 69, key is bcc0a2319104,35469,1734110234341/rs:state/1734110235474/Put/seqid=0 2024-12-13T17:19:05,367 INFO [RS:0;bcc0a2319104:35469 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,35469,1734110234341; zookeeper connection closed. 2024-12-13T17:19:05,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:19:05,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35469-0x10020655d730001, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:19:05,367 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@20c9f881 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@20c9f881 2024-12-13T17:19:05,368 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-13T17:19:05,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741859_1035 (size=5156) 2024-12-13T17:19:05,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741859_1035 (size=5156) 2024-12-13T17:19:05,369 INFO [M:0;bcc0a2319104:40387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ddfc0ca08f4b41879a173e4287de028d 2024-12-13T17:19:05,393 DEBUG [M:0;bcc0a2319104:40387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/952ff38088f14ea5a2eed6221ee69dab is 52, key is load_balancer_on/state:d/1734110237468/Put/seqid=0 2024-12-13T17:19:05,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741860_1036 (size=5056) 2024-12-13T17:19:05,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741860_1036 (size=5056) 2024-12-13T17:19:05,400 INFO [M:0;bcc0a2319104:40387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/952ff38088f14ea5a2eed6221ee69dab 2024-12-13T17:19:05,406 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c27471771c048a7a1d4ce32a76b47cd as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5c27471771c048a7a1d4ce32a76b47cd 2024-12-13T17:19:05,412 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5c27471771c048a7a1d4ce32a76b47cd, entries=8, sequenceid=104, filesize=5.5 K 2024-12-13T17:19:05,414 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b75155fcaab449da893d4c1598222864 as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b75155fcaab449da893d4c1598222864 2024-12-13T17:19:05,420 INFO [M:0;bcc0a2319104:40387 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b75155fcaab449da893d4c1598222864 2024-12-13T17:19:05,421 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b75155fcaab449da893d4c1598222864, entries=11, sequenceid=104, filesize=6.3 K 2024-12-13T17:19:05,422 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ddfc0ca08f4b41879a173e4287de028d as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ddfc0ca08f4b41879a173e4287de028d 2024-12-13T17:19:05,428 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ddfc0ca08f4b41879a173e4287de028d, entries=1, sequenceid=104, filesize=5.0 K 2024-12-13T17:19:05,429 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/952ff38088f14ea5a2eed6221ee69dab as hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/952ff38088f14ea5a2eed6221ee69dab 2024-12-13T17:19:05,437 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/952ff38088f14ea5a2eed6221ee69dab, entries=1, sequenceid=104, filesize=4.9 K 2024-12-13T17:19:05,438 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.22 KB/41185, heapSize ~50.09 KB/51288, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=104, compaction requested=false 2024-12-13T17:19:05,439 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:05,439 DEBUG [M:0;bcc0a2319104:40387 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:19:05,440 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/MasterData/WALs/bcc0a2319104,40387,1734110233611 2024-12-13T17:19:05,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44417 is added to blk_1073741830_1006 (size=48486) 2024-12-13T17:19:05,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46779 is added to blk_1073741830_1006 (size=48486) 2024-12-13T17:19:05,442 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:19:05,442 INFO [M:0;bcc0a2319104:40387 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T17:19:05,442 INFO [M:0;bcc0a2319104:40387 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40387 2024-12-13T17:19:05,450 DEBUG [M:0;bcc0a2319104:40387 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/bcc0a2319104,40387,1734110233611 already deleted, retry=false 2024-12-13T17:19:05,545 INFO [regionserver/bcc0a2319104:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:19:05,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:19:05,559 INFO [M:0;bcc0a2319104:40387 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,40387,1734110233611; zookeeper connection closed. 2024-12-13T17:19:05,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40387-0x10020655d730000, quorum=127.0.0.1:62632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:19:05,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c2b96d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:05,573 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f8e335e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:19:05,573 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:19:05,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa9ffed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:19:05,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56aaad8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir/,STOPPED} 2024-12-13T17:19:05,577 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:19:05,577 WARN [BP-1491248910-172.17.0.2-1734110229401 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:19:05,577 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:19:05,577 WARN [BP-1491248910-172.17.0.2-1734110229401 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1491248910-172.17.0.2-1734110229401 (Datanode Uuid 83a6348b-0e3a-4fac-b80b-9445d1fd54a6) service to localhost/127.0.0.1:32953 2024-12-13T17:19:05,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data3/current/BP-1491248910-172.17.0.2-1734110229401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:05,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data4/current/BP-1491248910-172.17.0.2-1734110229401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:05,579 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:19:05,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14caea3b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:05,584 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32964f26{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:19:05,584 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:19:05,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76bce909{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:19:05,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47fbae2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir/,STOPPED} 2024-12-13T17:19:05,586 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:19:05,586 WARN [BP-1491248910-172.17.0.2-1734110229401 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:19:05,586 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:19:05,587 WARN [BP-1491248910-172.17.0.2-1734110229401 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1491248910-172.17.0.2-1734110229401 (Datanode Uuid d5bb1e9b-6d6e-440b-a7df-6cc2fd82f76f) service to localhost/127.0.0.1:32953 2024-12-13T17:19:05,587 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data1/current/BP-1491248910-172.17.0.2-1734110229401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:05,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/cluster_9426d811-9f1a-7451-bc03-957b286c8a52/dfs/data/data2/current/BP-1491248910-172.17.0.2-1734110229401 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:05,588 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:19:05,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57f73325{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:19:05,599 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@217e2a7b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:19:05,599 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:19:05,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75fa8fc8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:19:05,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22984d68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir/,STOPPED} 2024-12-13T17:19:05,608 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T17:19:05,641 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-13T17:19:05,647 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=65 (was 12) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32953 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/bcc0a2319104:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:32953 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:32953 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:32953 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/bcc0a2319104:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/bcc0a2319104:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:32953 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@6a3564d2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=219 (was 148) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4366 (was 5577) 2024-12-13T17:19:05,653 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=66, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=219, ProcessCount=11, AvailableMemoryMB=4366 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.log.dir so I do NOT create it in target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c1e1de6a-4b43-328c-b662-9e4093812075/hadoop.tmp.dir so I do NOT create it in target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240, deleteOnExit=true 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/test.cache.data in system properties and HBase conf 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir in system properties and HBase conf 2024-12-13T17:19:05,654 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T17:19:05,655 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:19:05,655 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:19:05,656 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T17:19:05,656 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/nfs.dump.dir in system properties and HBase conf 2024-12-13T17:19:05,656 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir in system properties and HBase conf 2024-12-13T17:19:05,656 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:19:05,656 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T17:19:05,656 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T17:19:05,667 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:19:05,982 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:05,987 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:19:05,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:19:05,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:19:05,988 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:19:05,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:05,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a9f818b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:19:05,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d3cdd43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:19:06,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@410de187{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir/jetty-localhost-36767-hadoop-hdfs-3_4_1-tests_jar-_-any-11243272257360120402/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:19:06,081 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a289a6d{HTTP/1.1, (http/1.1)}{localhost:36767} 2024-12-13T17:19:06,081 INFO [Time-limited test {}] server.Server(415): Started @118809ms 2024-12-13T17:19:06,094 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:19:06,297 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:06,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:19:06,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:19:06,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:19:06,303 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:19:06,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68946043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:19:06,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d57dd6e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:19:06,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@eb34e8b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir/jetty-localhost-33075-hadoop-hdfs-3_4_1-tests_jar-_-any-16683605040150037620/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:06,394 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fe50e48{HTTP/1.1, (http/1.1)}{localhost:33075} 2024-12-13T17:19:06,394 INFO [Time-limited test {}] server.Server(415): Started @119121ms 2024-12-13T17:19:06,395 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:19:06,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:06,436 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:19:06,440 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:19:06,440 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:19:06,440 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:19:06,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d6430de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:19:06,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b4f6ace{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:19:06,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25ddc243{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir/jetty-localhost-46103-hadoop-hdfs-3_4_1-tests_jar-_-any-2515175351702960025/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:06,534 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@537a7cfb{HTTP/1.1, (http/1.1)}{localhost:46103} 2024-12-13T17:19:06,535 INFO [Time-limited test {}] server.Server(415): Started @119262ms 2024-12-13T17:19:06,537 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:19:07,027 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data1/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:07,027 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data2/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:07,054 WARN [Thread-430 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:19:07,056 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87ad834867f6b8fd with lease ID 0xe8161f9f6bfb5276: Processing first storage report for DS-78bc1bfb-b468-4433-986c-32784cc53682 from datanode DatanodeRegistration(127.0.0.1:43399, datanodeUuid=399ff96b-0b78-4ee6-9e59-a9b2b34e2e47, infoPort=42217, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:07,056 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87ad834867f6b8fd with lease ID 0xe8161f9f6bfb5276: from storage DS-78bc1bfb-b468-4433-986c-32784cc53682 node DatanodeRegistration(127.0.0.1:43399, datanodeUuid=399ff96b-0b78-4ee6-9e59-a9b2b34e2e47, infoPort=42217, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:07,057 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87ad834867f6b8fd with lease ID 0xe8161f9f6bfb5276: Processing first storage report for DS-4d781ede-c31c-404b-9e2d-5b7e99135fcf from datanode DatanodeRegistration(127.0.0.1:43399, datanodeUuid=399ff96b-0b78-4ee6-9e59-a9b2b34e2e47, infoPort=42217, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:07,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87ad834867f6b8fd with lease ID 0xe8161f9f6bfb5276: from storage DS-4d781ede-c31c-404b-9e2d-5b7e99135fcf node DatanodeRegistration(127.0.0.1:43399, datanodeUuid=399ff96b-0b78-4ee6-9e59-a9b2b34e2e47, infoPort=42217, infoSecurePort=0, ipcPort=37543, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:07,209 WARN [Thread-477 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data3/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:07,209 WARN [Thread-478 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data4/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:07,225 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:19:07,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf9286f4eccd35a2 with lease ID 0xe8161f9f6bfb5277: Processing first storage report for DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb from datanode DatanodeRegistration(127.0.0.1:33729, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=43067, infoSecurePort=0, ipcPort=42143, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:07,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf9286f4eccd35a2 with lease ID 0xe8161f9f6bfb5277: from storage DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb node DatanodeRegistration(127.0.0.1:33729, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=43067, infoSecurePort=0, ipcPort=42143, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:07,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf9286f4eccd35a2 with lease ID 0xe8161f9f6bfb5277: Processing first storage report for DS-7f4aae4e-4c5a-4ab9-b1f0-bd0b5c5ad36c from datanode DatanodeRegistration(127.0.0.1:33729, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=43067, infoSecurePort=0, ipcPort=42143, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:07,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf9286f4eccd35a2 with lease ID 0xe8161f9f6bfb5277: from storage DS-7f4aae4e-4c5a-4ab9-b1f0-bd0b5c5ad36c node DatanodeRegistration(127.0.0.1:33729, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=43067, infoSecurePort=0, ipcPort=42143, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:07,279 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751 2024-12-13T17:19:07,282 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/zookeeper_0, clientPort=64400, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T17:19:07,283 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=64400 2024-12-13T17:19:07,283 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:07,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:07,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:19:07,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:19:07,299 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4 with version=8 2024-12-13T17:19:07,299 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/hbase-staging 2024-12-13T17:19:07,301 INFO [Time-limited test {}] client.ConnectionUtils(129): master/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:19:07,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:07,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:07,302 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:19:07,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:07,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:19:07,302 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:19:07,302 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:19:07,303 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41247 2024-12-13T17:19:07,303 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:07,305 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:07,307 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41247 connecting to ZooKeeper ensemble=127.0.0.1:64400 2024-12-13T17:19:07,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412470x0, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:19:07,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41247-0x10020671c920000 connected 2024-12-13T17:19:07,427 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:19:07,429 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:19:07,429 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:19:07,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41247 2024-12-13T17:19:07,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41247 2024-12-13T17:19:07,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41247 2024-12-13T17:19:07,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41247 2024-12-13T17:19:07,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41247 2024-12-13T17:19:07,432 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4, hbase.cluster.distributed=false 2024-12-13T17:19:07,447 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:19:07,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:07,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:07,447 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:19:07,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:07,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:19:07,447 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:19:07,447 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:19:07,448 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43669 2024-12-13T17:19:07,448 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T17:19:07,449 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T17:19:07,450 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:07,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:07,454 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:43669 connecting to ZooKeeper ensemble=127.0.0.1:64400 2024-12-13T17:19:07,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436690x0, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:19:07,461 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:436690x0, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:19:07,461 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43669-0x10020671c920001 connected 2024-12-13T17:19:07,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:19:07,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:19:07,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43669 2024-12-13T17:19:07,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43669 2024-12-13T17:19:07,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43669 2024-12-13T17:19:07,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43669 2024-12-13T17:19:07,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43669 2024-12-13T17:19:07,465 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/bcc0a2319104,41247,1734110347301 2024-12-13T17:19:07,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:19:07,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:19:07,476 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bcc0a2319104,41247,1734110347301 2024-12-13T17:19:07,479 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bcc0a2319104:41247 2024-12-13T17:19:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:19:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:19:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,486 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:19:07,486 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bcc0a2319104,41247,1734110347301 from backup master directory 2024-12-13T17:19:07,486 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:19:07,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bcc0a2319104,41247,1734110347301 2024-12-13T17:19:07,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:19:07,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:19:07,494 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:19:07,494 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bcc0a2319104,41247,1734110347301 2024-12-13T17:19:07,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:19:07,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:19:07,508 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/hbase.id with ID: f04823e1-8e5e-4dac-9f25-c3ef4a42c30a 2024-12-13T17:19:07,521 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:07,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:19:07,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:19:07,545 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:19:07,547 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T17:19:07,547 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:19:07,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:19:07,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:19:07,556 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store 2024-12-13T17:19:07,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:19:07,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:19:07,563 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:07,563 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:19:07,563 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:07,563 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:07,563 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:19:07,564 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:07,564 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:19:07,564 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:19:07,564 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/.initializing 2024-12-13T17:19:07,564 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301 2024-12-13T17:19:07,567 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C41247%2C1734110347301, suffix=, logDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301, archiveDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/oldWALs, maxLogs=10 2024-12-13T17:19:07,568 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C41247%2C1734110347301.1734110347568 2024-12-13T17:19:07,573 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 2024-12-13T17:19:07,573 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43067:43067),(127.0.0.1/127.0.0.1:42217:42217)] 2024-12-13T17:19:07,573 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:19:07,574 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:07,574 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,574 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,575 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T17:19:07,578 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:19:07,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T17:19:07,580 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:19:07,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T17:19:07,583 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,583 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:19:07,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T17:19:07,586 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,587 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:19:07,588 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,588 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,590 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T17:19:07,591 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:19:07,594 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:19:07,594 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818852, jitterRate=0.0412241667509079}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T17:19:07,595 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:19:07,596 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T17:19:07,600 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a314eeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:19:07,601 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T17:19:07,601 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T17:19:07,601 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T17:19:07,602 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T17:19:07,602 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-13T17:19:07,602 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-13T17:19:07,602 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T17:19:07,604 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T17:19:07,605 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T17:19:07,610 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T17:19:07,610 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T17:19:07,611 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T17:19:07,618 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T17:19:07,619 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T17:19:07,620 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T17:19:07,627 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T17:19:07,628 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T17:19:07,635 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T17:19:07,639 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T17:19:07,643 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T17:19:07,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:19:07,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:19:07,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,653 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=bcc0a2319104,41247,1734110347301, sessionid=0x10020671c920000, setting cluster-up flag (Was=false) 2024-12-13T17:19:07,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,694 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T17:19:07,696 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,41247,1734110347301 2024-12-13T17:19:07,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:07,735 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T17:19:07,738 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,41247,1734110347301 2024-12-13T17:19:07,743 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T17:19:07,744 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T17:19:07,744 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T17:19:07,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bcc0a2319104,41247,1734110347301 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T17:19:07,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:19:07,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:19:07,746 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:19:07,746 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:19:07,746 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bcc0a2319104:0, corePoolSize=10, maxPoolSize=10 2024-12-13T17:19:07,746 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,746 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:19:07,747 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734110377748 2024-12-13T17:19:07,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T17:19:07,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T17:19:07,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T17:19:07,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T17:19:07,748 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:19:07,749 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T17:19:07,749 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T17:19:07,749 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T17:19:07,749 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,749 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T17:19:07,749 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T17:19:07,749 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T17:19:07,750 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T17:19:07,750 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T17:19:07,750 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,750 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110347750,5,FailOnTimeoutGroup] 2024-12-13T17:19:07,750 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:19:07,750 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110347750,5,FailOnTimeoutGroup] 2024-12-13T17:19:07,751 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,751 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T17:19:07,751 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,751 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:19:07,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:19:07,765 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T17:19:07,765 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4 2024-12-13T17:19:07,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:19:07,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:19:07,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:07,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:19:07,783 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bcc0a2319104:43669 2024-12-13T17:19:07,784 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1008): ClusterId : f04823e1-8e5e-4dac-9f25-c3ef4a42c30a 2024-12-13T17:19:07,785 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T17:19:07,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:19:07,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:19:07,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:19:07,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:19:07,788 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:19:07,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:19:07,791 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:19:07,791 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:07,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:19:07,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/meta/1588230740 2024-12-13T17:19:07,794 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/meta/1588230740 2024-12-13T17:19:07,794 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T17:19:07,794 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T17:19:07,796 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:19:07,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:19:07,799 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:19:07,800 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841409, jitterRate=0.06990793347358704}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:19:07,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:19:07,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:19:07,801 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:19:07,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:19:07,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:19:07,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:19:07,801 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:19:07,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:19:07,802 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:19:07,802 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T17:19:07,803 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T17:19:07,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T17:19:07,803 DEBUG [RS:0;bcc0a2319104:43669 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e962c32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:19:07,804 DEBUG [RS:0;bcc0a2319104:43669 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b521432, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:19:07,804 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T17:19:07,804 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T17:19:07,804 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T17:19:07,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T17:19:07,805 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(3073): reportForDuty to master=bcc0a2319104,41247,1734110347301 with isa=bcc0a2319104/172.17.0.2:43669, startcode=1734110347446 2024-12-13T17:19:07,805 DEBUG [RS:0;bcc0a2319104:43669 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:19:07,805 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T17:19:07,807 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36943, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:19:07,808 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41247 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bcc0a2319104,43669,1734110347446 2024-12-13T17:19:07,808 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41247 {}] master.ServerManager(486): Registering regionserver=bcc0a2319104,43669,1734110347446 2024-12-13T17:19:07,810 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4 2024-12-13T17:19:07,810 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34581 2024-12-13T17:19:07,810 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T17:19:07,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:19:07,925 DEBUG [RS:0;bcc0a2319104:43669 {}] zookeeper.ZKUtil(111): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bcc0a2319104,43669,1734110347446 2024-12-13T17:19:07,925 WARN [RS:0;bcc0a2319104:43669 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:19:07,925 INFO [RS:0;bcc0a2319104:43669 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:19:07,926 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446 2024-12-13T17:19:07,926 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bcc0a2319104,43669,1734110347446] 2024-12-13T17:19:07,932 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T17:19:07,933 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T17:19:07,935 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T17:19:07,936 INFO [RS:0;bcc0a2319104:43669 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:19:07,936 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,936 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T17:19:07,937 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,937 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,937 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,937 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,937 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,937 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,937 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:19:07,938 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,938 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,938 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,938 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,938 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:07,938 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:19:07,938 DEBUG [RS:0;bcc0a2319104:43669 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:19:07,939 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,939 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,939 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,939 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,939 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,43669,1734110347446-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:19:07,953 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T17:19:07,954 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,43669,1734110347446-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:07,955 WARN [bcc0a2319104:41247 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T17:19:07,969 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.Replication(204): bcc0a2319104,43669,1734110347446 started 2024-12-13T17:19:07,969 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1767): Serving as bcc0a2319104,43669,1734110347446, RpcServer on bcc0a2319104/172.17.0.2:43669, sessionid=0x10020671c920001 2024-12-13T17:19:07,970 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T17:19:07,970 DEBUG [RS:0;bcc0a2319104:43669 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bcc0a2319104,43669,1734110347446 2024-12-13T17:19:07,970 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,43669,1734110347446' 2024-12-13T17:19:07,970 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T17:19:07,970 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T17:19:07,971 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T17:19:07,971 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T17:19:07,971 DEBUG [RS:0;bcc0a2319104:43669 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bcc0a2319104,43669,1734110347446 2024-12-13T17:19:07,971 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,43669,1734110347446' 2024-12-13T17:19:07,971 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T17:19:07,971 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T17:19:07,972 DEBUG [RS:0;bcc0a2319104:43669 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T17:19:07,972 INFO [RS:0;bcc0a2319104:43669 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T17:19:07,972 INFO [RS:0;bcc0a2319104:43669 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T17:19:08,075 INFO [RS:0;bcc0a2319104:43669 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C43669%2C1734110347446, suffix=, logDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446, archiveDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs, maxLogs=32 2024-12-13T17:19:08,078 INFO [RS:0;bcc0a2319104:43669 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.1734110348078 2024-12-13T17:19:08,087 INFO [RS:0;bcc0a2319104:43669 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 2024-12-13T17:19:08,088 DEBUG [RS:0;bcc0a2319104:43669 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42217:42217),(127.0.0.1/127.0.0.1:43067:43067)] 2024-12-13T17:19:08,206 DEBUG [bcc0a2319104:41247 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-13T17:19:08,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bcc0a2319104,43669,1734110347446 2024-12-13T17:19:08,207 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,43669,1734110347446, state=OPENING 2024-12-13T17:19:08,269 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T17:19:08,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:08,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:08,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=bcc0a2319104,43669,1734110347446}] 2024-12-13T17:19:08,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:19:08,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:19:08,435 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,43669,1734110347446 2024-12-13T17:19:08,435 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T17:19:08,437 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T17:19:08,441 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T17:19:08,442 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:19:08,444 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C43669%2C1734110347446.meta, suffix=.meta, logDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446, archiveDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs, maxLogs=32 2024-12-13T17:19:08,446 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta 2024-12-13T17:19:08,453 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta 2024-12-13T17:19:08,453 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42217:42217),(127.0.0.1/127.0.0.1:43067:43067)] 2024-12-13T17:19:08,453 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:19:08,453 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T17:19:08,454 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T17:19:08,454 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T17:19:08,454 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T17:19:08,454 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:08,454 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T17:19:08,454 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T17:19:08,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:19:08,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:19:08,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:08,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:19:08,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:19:08,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:19:08,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:08,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:19:08,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:19:08,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:19:08,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:08,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:19:08,462 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/meta/1588230740 2024-12-13T17:19:08,464 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/meta/1588230740 2024-12-13T17:19:08,465 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:19:08,467 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:19:08,468 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705659, jitterRate=-0.10270854830741882}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:19:08,469 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:19:08,470 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734110348435 2024-12-13T17:19:08,472 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T17:19:08,473 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T17:19:08,473 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,43669,1734110347446 2024-12-13T17:19:08,474 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,43669,1734110347446, state=OPEN 2024-12-13T17:19:08,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:19:08,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:19:08,502 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:19:08,502 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:19:08,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T17:19:08,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=bcc0a2319104,43669,1734110347446 in 223 msec 2024-12-13T17:19:08,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T17:19:08,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 702 msec 2024-12-13T17:19:08,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 767 msec 2024-12-13T17:19:08,510 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734110348510, completionTime=-1 2024-12-13T17:19:08,510 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-13T17:19:08,510 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T17:19:08,511 DEBUG [hconnection-0x739dbc94-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:19:08,512 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:19:08,513 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-13T17:19:08,513 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734110408513 2024-12-13T17:19:08,513 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734110468513 2024-12-13T17:19:08,513 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-13T17:19:08,536 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41247,1734110347301-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:08,536 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41247,1734110347301-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:08,536 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41247,1734110347301-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:08,536 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bcc0a2319104:41247, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:08,536 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:08,537 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T17:19:08,537 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:19:08,538 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T17:19:08,539 DEBUG [master/bcc0a2319104:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T17:19:08,540 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:19:08,540 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:08,541 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:19:08,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:19:08,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:19:08,551 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ec823748c41d7535bc958c58981d1660, NAME => 'hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4 2024-12-13T17:19:08,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:19:08,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:19:08,559 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:08,559 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing ec823748c41d7535bc958c58981d1660, disabling compactions & flushes 2024-12-13T17:19:08,559 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:08,560 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:08,560 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. after waiting 0 ms 2024-12-13T17:19:08,560 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:08,560 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:08,560 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for ec823748c41d7535bc958c58981d1660: 2024-12-13T17:19:08,561 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:19:08,561 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734110348561"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110348561"}]},"ts":"1734110348561"} 2024-12-13T17:19:08,564 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:19:08,566 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:19:08,566 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110348566"}]},"ts":"1734110348566"} 2024-12-13T17:19:08,568 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T17:19:08,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ec823748c41d7535bc958c58981d1660, ASSIGN}] 2024-12-13T17:19:08,587 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ec823748c41d7535bc958c58981d1660, ASSIGN 2024-12-13T17:19:08,589 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=ec823748c41d7535bc958c58981d1660, ASSIGN; state=OFFLINE, location=bcc0a2319104,43669,1734110347446; forceNewPlan=false, retain=false 2024-12-13T17:19:08,739 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ec823748c41d7535bc958c58981d1660, regionState=OPENING, regionLocation=bcc0a2319104,43669,1734110347446 2024-12-13T17:19:08,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure ec823748c41d7535bc958c58981d1660, server=bcc0a2319104,43669,1734110347446}] 2024-12-13T17:19:08,900 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,43669,1734110347446 2024-12-13T17:19:08,903 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:08,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => ec823748c41d7535bc958c58981d1660, NAME => 'hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:19:08,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:08,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:08,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:08,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:08,906 INFO [StoreOpener-ec823748c41d7535bc958c58981d1660-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:08,908 INFO [StoreOpener-ec823748c41d7535bc958c58981d1660-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec823748c41d7535bc958c58981d1660 columnFamilyName info 2024-12-13T17:19:08,908 DEBUG [StoreOpener-ec823748c41d7535bc958c58981d1660-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:08,909 INFO [StoreOpener-ec823748c41d7535bc958c58981d1660-1 {}] regionserver.HStore(327): Store=ec823748c41d7535bc958c58981d1660/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:19:08,910 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:08,910 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:08,913 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:08,916 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:19:08,917 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened ec823748c41d7535bc958c58981d1660; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832650, jitterRate=0.05876933038234711}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:19:08,917 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for ec823748c41d7535bc958c58981d1660: 2024-12-13T17:19:08,918 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660., pid=6, masterSystemTime=1734110348900 2024-12-13T17:19:08,920 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:08,920 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:08,921 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ec823748c41d7535bc958c58981d1660, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,43669,1734110347446 2024-12-13T17:19:08,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T17:19:08,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure ec823748c41d7535bc958c58981d1660, server=bcc0a2319104,43669,1734110347446 in 179 msec 2024-12-13T17:19:08,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T17:19:08,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=ec823748c41d7535bc958c58981d1660, ASSIGN in 341 msec 2024-12-13T17:19:08,929 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:19:08,930 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110348929"}]},"ts":"1734110348929"} 2024-12-13T17:19:08,932 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T17:19:08,943 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T17:19:08,944 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:19:08,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 407 msec 2024-12-13T17:19:08,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:19:08,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:08,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:08,959 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T17:19:08,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:19:08,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 28 msec 2024-12-13T17:19:08,992 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T17:19:09,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:19:09,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 30 msec 2024-12-13T17:19:09,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T17:19:09,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T17:19:09,060 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.566sec 2024-12-13T17:19:09,061 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T17:19:09,061 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T17:19:09,061 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T17:19:09,061 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T17:19:09,061 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T17:19:09,061 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41247,1734110347301-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:19:09,061 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41247,1734110347301-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T17:19:09,063 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T17:19:09,063 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T17:19:09,063 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41247,1734110347301-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,069 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ec1b9dd to 127.0.0.1:64400 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@102b78be 2024-12-13T17:19:09,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15a10a64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:19:09,080 DEBUG [hconnection-0x4416ea7c-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:19:09,081 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:19:09,084 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=bcc0a2319104,41247,1734110347301 2024-12-13T17:19:09,084 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:09,087 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-13T17:19:09,102 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:19:09,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:09,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:09,102 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:19:09,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:19:09,102 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:19:09,103 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:19:09,103 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:19:09,104 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42631 2024-12-13T17:19:09,104 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T17:19:09,104 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T17:19:09,105 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:09,107 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:19:09,110 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42631 connecting to ZooKeeper ensemble=127.0.0.1:64400 2024-12-13T17:19:09,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:426310x0, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:19:09,119 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:426310x0, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:19:09,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42631-0x10020671c920003 connected 2024-12-13T17:19:09,121 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-13T17:19:09,121 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:19:09,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42631 2024-12-13T17:19:09,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42631 2024-12-13T17:19:09,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42631 2024-12-13T17:19:09,123 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42631 2024-12-13T17:19:09,125 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42631 2024-12-13T17:19:09,127 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-13T17:19:09,137 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;bcc0a2319104:42631 2024-12-13T17:19:09,138 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1008): ClusterId : f04823e1-8e5e-4dac-9f25-c3ef4a42c30a 2024-12-13T17:19:09,138 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T17:19:09,145 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T17:19:09,145 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T17:19:09,152 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T17:19:09,153 DEBUG [RS:1;bcc0a2319104:42631 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@315e676e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:19:09,153 DEBUG [RS:1;bcc0a2319104:42631 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@689dcbd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:19:09,153 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T17:19:09,153 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T17:19:09,153 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T17:19:09,154 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(3073): reportForDuty to master=bcc0a2319104,41247,1734110347301 with isa=bcc0a2319104/172.17.0.2:42631, startcode=1734110349102 2024-12-13T17:19:09,154 DEBUG [RS:1;bcc0a2319104:42631 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:19:09,156 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41187, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:19:09,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41247 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bcc0a2319104,42631,1734110349102 2024-12-13T17:19:09,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41247 {}] master.ServerManager(486): Registering regionserver=bcc0a2319104,42631,1734110349102 2024-12-13T17:19:09,158 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4 2024-12-13T17:19:09,158 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34581 2024-12-13T17:19:09,158 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T17:19:09,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:19:09,169 DEBUG [RS:1;bcc0a2319104:42631 {}] zookeeper.ZKUtil(111): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bcc0a2319104,42631,1734110349102 2024-12-13T17:19:09,169 WARN [RS:1;bcc0a2319104:42631 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:19:09,169 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bcc0a2319104,42631,1734110349102] 2024-12-13T17:19:09,169 INFO [RS:1;bcc0a2319104:42631 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:19:09,170 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,42631,1734110349102 2024-12-13T17:19:09,175 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T17:19:09,175 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T17:19:09,177 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T17:19:09,178 INFO [RS:1;bcc0a2319104:42631 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:19:09,178 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,178 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T17:19:09,179 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,179 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,179 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:19:09,180 DEBUG [RS:1;bcc0a2319104:42631 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:19:09,181 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,181 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,181 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,181 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,181 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,42631,1734110349102-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:19:09,198 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T17:19:09,198 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,42631,1734110349102-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:19:09,212 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.Replication(204): bcc0a2319104,42631,1734110349102 started 2024-12-13T17:19:09,212 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1767): Serving as bcc0a2319104,42631,1734110349102, RpcServer on bcc0a2319104/172.17.0.2:42631, sessionid=0x10020671c920003 2024-12-13T17:19:09,213 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T17:19:09,213 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;bcc0a2319104:42631,5,FailOnTimeoutGroup] 2024-12-13T17:19:09,213 DEBUG [RS:1;bcc0a2319104:42631 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bcc0a2319104,42631,1734110349102 2024-12-13T17:19:09,213 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,42631,1734110349102' 2024-12-13T17:19:09,213 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T17:19:09,213 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-13T17:19:09,213 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T17:19:09,214 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T17:19:09,214 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T17:19:09,214 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T17:19:09,214 DEBUG [RS:1;bcc0a2319104:42631 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bcc0a2319104,42631,1734110349102 2024-12-13T17:19:09,214 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,42631,1734110349102' 2024-12-13T17:19:09,214 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T17:19:09,215 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T17:19:09,215 DEBUG [RS:1;bcc0a2319104:42631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T17:19:09,215 INFO [RS:1;bcc0a2319104:42631 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T17:19:09,215 INFO [RS:1;bcc0a2319104:42631 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T17:19:09,216 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T17:19:09,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41247 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-13T17:19:09,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41247 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-13T17:19:09,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41247 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:19:09,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41247 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-13T17:19:09,221 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:19:09,221 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:09,221 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41247 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-13T17:19:09,222 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:19:09,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41247 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:19:09,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741837_1013 (size=393) 2024-12-13T17:19:09,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741837_1013 (size=393) 2024-12-13T17:19:09,234 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4ae54d53dadb6f7d5f47a16f2daa01e3, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4 2024-12-13T17:19:09,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33729 is added to blk_1073741838_1014 (size=76) 2024-12-13T17:19:09,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43399 is added to blk_1073741838_1014 (size=76) 2024-12-13T17:19:09,242 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:09,242 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing 4ae54d53dadb6f7d5f47a16f2daa01e3, disabling compactions & flushes 2024-12-13T17:19:09,242 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:09,242 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:09,242 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. after waiting 0 ms 2024-12-13T17:19:09,242 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:09,242 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:09,242 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4ae54d53dadb6f7d5f47a16f2daa01e3: 2024-12-13T17:19:09,244 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:19:09,244 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1734110349244"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110349244"}]},"ts":"1734110349244"} 2024-12-13T17:19:09,246 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:19:09,248 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:19:09,248 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110349248"}]},"ts":"1734110349248"} 2024-12-13T17:19:09,250 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-13T17:19:09,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ae54d53dadb6f7d5f47a16f2daa01e3, ASSIGN}] 2024-12-13T17:19:09,271 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ae54d53dadb6f7d5f47a16f2daa01e3, ASSIGN 2024-12-13T17:19:09,272 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ae54d53dadb6f7d5f47a16f2daa01e3, ASSIGN; state=OFFLINE, location=bcc0a2319104,43669,1734110347446; forceNewPlan=false, retain=false 2024-12-13T17:19:09,317 INFO [RS:1;bcc0a2319104:42631 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C42631%2C1734110349102, suffix=, logDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,42631,1734110349102, archiveDir=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs, maxLogs=32 2024-12-13T17:19:09,319 INFO [RS:1;bcc0a2319104:42631 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C42631%2C1734110349102.1734110349319 2024-12-13T17:19:09,326 INFO [RS:1;bcc0a2319104:42631 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,42631,1734110349102/bcc0a2319104%2C42631%2C1734110349102.1734110349319 2024-12-13T17:19:09,326 DEBUG [RS:1;bcc0a2319104:42631 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42217:42217),(127.0.0.1/127.0.0.1:43067:43067)] 2024-12-13T17:19:09,424 INFO [bcc0a2319104:41247 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-13T17:19:09,425 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=4ae54d53dadb6f7d5f47a16f2daa01e3, regionState=OPENING, regionLocation=bcc0a2319104,43669,1734110347446 2024-12-13T17:19:09,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 4ae54d53dadb6f7d5f47a16f2daa01e3, server=bcc0a2319104,43669,1734110347446}] 2024-12-13T17:19:09,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bcc0a2319104,43669,1734110347446 2024-12-13T17:19:09,594 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:09,595 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 4ae54d53dadb6f7d5f47a16f2daa01e3, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:19:09,596 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:09,596 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:19:09,597 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:09,597 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:09,599 INFO [StoreOpener-4ae54d53dadb6f7d5f47a16f2daa01e3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:09,602 INFO [StoreOpener-4ae54d53dadb6f7d5f47a16f2daa01e3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ae54d53dadb6f7d5f47a16f2daa01e3 columnFamilyName info 2024-12-13T17:19:09,602 DEBUG [StoreOpener-4ae54d53dadb6f7d5f47a16f2daa01e3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:19:09,602 INFO [StoreOpener-4ae54d53dadb6f7d5f47a16f2daa01e3-1 {}] regionserver.HStore(327): Store=4ae54d53dadb6f7d5f47a16f2daa01e3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:19:09,604 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:09,605 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:09,608 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:09,611 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:19:09,612 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 4ae54d53dadb6f7d5f47a16f2daa01e3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793646, jitterRate=0.009173423051834106}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:19:09,614 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 4ae54d53dadb6f7d5f47a16f2daa01e3: 2024-12-13T17:19:09,615 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3., pid=11, masterSystemTime=1734110349585 2024-12-13T17:19:09,617 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:09,618 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:09,618 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=4ae54d53dadb6f7d5f47a16f2daa01e3, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,43669,1734110347446 2024-12-13T17:19:09,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T17:19:09,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 4ae54d53dadb6f7d5f47a16f2daa01e3, server=bcc0a2319104,43669,1734110347446 in 191 msec 2024-12-13T17:19:09,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T17:19:09,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4ae54d53dadb6f7d5f47a16f2daa01e3, ASSIGN in 354 msec 2024-12-13T17:19:09,627 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:19:09,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110349627"}]},"ts":"1734110349627"} 2024-12-13T17:19:09,629 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-13T17:19:09,696 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:19:09,699 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 479 msec 2024-12-13T17:19:10,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:19:10,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:19:10,614 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:19:10,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:19:10,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:19:13,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T17:19:13,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T17:19:13,934 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-13T17:19:14,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-13T17:19:14,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-13T17:19:14,021 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-13T17:19:19,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41247 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:19:19,227 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-13T17:19:19,233 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-13T17:19:19,234 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:19,244 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:19:19,246 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:19:19,267 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:19:19,277 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:19,280 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:19:19,281 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:19:19,281 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:19:19,281 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:19:19,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5280103b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:19:19,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65d69bd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:19:19,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@310c7b0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir/jetty-localhost-34803-hadoop-hdfs-3_4_1-tests_jar-_-any-4365294815036611452/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:19,373 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64cbae84{HTTP/1.1, (http/1.1)}{localhost:34803} 2024-12-13T17:19:19,373 INFO [Time-limited test {}] server.Server(415): Started @132101ms 2024-12-13T17:19:19,375 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:19:19,403 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:19,406 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:19:19,407 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:19:19,407 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:19:19,407 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:19:19,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@464ee75e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:19:19,408 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@200c1294{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:19:19,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43ee2e92{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir/jetty-localhost-46137-hadoop-hdfs-3_4_1-tests_jar-_-any-13774856872549651848/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:19,499 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e16b6dc{HTTP/1.1, (http/1.1)}{localhost:46137} 2024-12-13T17:19:19,499 INFO [Time-limited test {}] server.Server(415): Started @132226ms 2024-12-13T17:19:19,500 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:19:19,526 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:19,529 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:19:19,530 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:19:19,530 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:19:19,530 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:19:19,530 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c0b5044{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:19:19,530 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60af4a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:19:19,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cf54014{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir/jetty-localhost-34295-hadoop-hdfs-3_4_1-tests_jar-_-any-7503455798498518077/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:19,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5379ca01{HTTP/1.1, (http/1.1)}{localhost:34295} 2024-12-13T17:19:19,622 INFO [Time-limited test {}] server.Server(415): Started @132350ms 2024-12-13T17:19:19,624 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:19:20,200 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:20,200 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:20,224 WARN [Thread-614 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:19:20,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x84b4e6dd58505abb with lease ID 0xe8161f9f6bfb5278: Processing first storage report for DS-dabb14ee-4400-4e52-95df-8dd261c84837 from datanode DatanodeRegistration(127.0.0.1:43667, datanodeUuid=61dfa4d7-b9d9-4b14-9600-d429686ff6c1, infoPort=46853, infoSecurePort=0, ipcPort=39257, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:20,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x84b4e6dd58505abb with lease ID 0xe8161f9f6bfb5278: from storage DS-dabb14ee-4400-4e52-95df-8dd261c84837 node DatanodeRegistration(127.0.0.1:43667, datanodeUuid=61dfa4d7-b9d9-4b14-9600-d429686ff6c1, infoPort=46853, infoSecurePort=0, ipcPort=39257, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:20,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x84b4e6dd58505abb with lease ID 0xe8161f9f6bfb5278: Processing first storage report for DS-df312f11-8be4-4513-bda3-2dfb36fed127 from datanode DatanodeRegistration(127.0.0.1:43667, datanodeUuid=61dfa4d7-b9d9-4b14-9600-d429686ff6c1, infoPort=46853, infoSecurePort=0, ipcPort=39257, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:20,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x84b4e6dd58505abb with lease ID 0xe8161f9f6bfb5278: from storage DS-df312f11-8be4-4513-bda3-2dfb36fed127 node DatanodeRegistration(127.0.0.1:43667, datanodeUuid=61dfa4d7-b9d9-4b14-9600-d429686ff6c1, infoPort=46853, infoSecurePort=0, ipcPort=39257, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T17:19:20,358 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data7/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:20,358 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data8/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:20,375 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:19:20,379 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b56820a5e3d6d13 with lease ID 0xe8161f9f6bfb5279: Processing first storage report for DS-41432c6c-e811-4832-8cf1-bbe828893aee from datanode DatanodeRegistration(127.0.0.1:43595, datanodeUuid=80599a66-0713-47da-bc60-39cdcf7785c8, infoPort=33259, infoSecurePort=0, ipcPort=34755, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:20,379 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b56820a5e3d6d13 with lease ID 0xe8161f9f6bfb5279: from storage DS-41432c6c-e811-4832-8cf1-bbe828893aee node DatanodeRegistration(127.0.0.1:43595, datanodeUuid=80599a66-0713-47da-bc60-39cdcf7785c8, infoPort=33259, infoSecurePort=0, ipcPort=34755, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:20,379 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b56820a5e3d6d13 with lease ID 0xe8161f9f6bfb5279: Processing first storage report for DS-6a42a0cb-727f-485b-8c56-0e407e4291bb from datanode DatanodeRegistration(127.0.0.1:43595, datanodeUuid=80599a66-0713-47da-bc60-39cdcf7785c8, infoPort=33259, infoSecurePort=0, ipcPort=34755, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:20,379 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b56820a5e3d6d13 with lease ID 0xe8161f9f6bfb5279: from storage DS-6a42a0cb-727f-485b-8c56-0e407e4291bb node DatanodeRegistration(127.0.0.1:43595, datanodeUuid=80599a66-0713-47da-bc60-39cdcf7785c8, infoPort=33259, infoSecurePort=0, ipcPort=34755, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:20,441 WARN [Thread-695 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data9/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:20,441 WARN [Thread-696 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data10/current/BP-2145113159-172.17.0.2-1734110345678/current, will proceed with Du for space computation calculation, 2024-12-13T17:19:20,456 WARN [Thread-658 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:19:20,459 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3be52d6d9cb3bdd with lease ID 0xe8161f9f6bfb527a: Processing first storage report for DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f from datanode DatanodeRegistration(127.0.0.1:37023, datanodeUuid=249bf48f-06d9-42e4-a54b-9ee721721203, infoPort=43331, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:20,459 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3be52d6d9cb3bdd with lease ID 0xe8161f9f6bfb527a: from storage DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f node DatanodeRegistration(127.0.0.1:37023, datanodeUuid=249bf48f-06d9-42e4-a54b-9ee721721203, infoPort=43331, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T17:19:20,459 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3be52d6d9cb3bdd with lease ID 0xe8161f9f6bfb527a: Processing first storage report for DS-41d81d35-8d61-4b73-b5ef-83413c0d0508 from datanode DatanodeRegistration(127.0.0.1:37023, datanodeUuid=249bf48f-06d9-42e4-a54b-9ee721721203, infoPort=43331, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678) 2024-12-13T17:19:20,459 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3be52d6d9cb3bdd with lease ID 0xe8161f9f6bfb527a: from storage DS-41d81d35-8d61-4b73-b5ef-83413c0d0508 node DatanodeRegistration(127.0.0.1:37023, datanodeUuid=249bf48f-06d9-42e4-a54b-9ee721721203, infoPort=43331, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:20,557 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,556 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015 java.io.IOException: Bad response ERROR for BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015 from datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,556 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,556 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,562 WARN [DataStreamer for file /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 block BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK], DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:20,562 WARN [DataStreamer for file /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta block BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:20,562 WARN [DataStreamer for file /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,42631,1734110349102/bcc0a2319104%2C42631%2C1734110349102.1734110349319 block BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:20,563 WARN [DataStreamer for file /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 block BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:20,562 WARN [PacketResponder: BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33729] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,562 WARN [PacketResponder: BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33729] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,562 WARN [PacketResponder: BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33729] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:55106 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55106 dst: /127.0.0.1:33729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:36410 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36410 dst: /127.0.0.1:43399 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:36414 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36414 dst: /127.0.0.1:43399 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1073165494_22 at /127.0.0.1:36444 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36444 dst: /127.0.0.1:43399 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55148 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55148 dst: /127.0.0.1:33729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55152 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55152 dst: /127.0.0.1:33729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1073165494_22 at /127.0.0.1:55208 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:33729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55208 dst: /127.0.0.1:33729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:36380 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36380 dst: /127.0.0.1:43399 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25ddc243{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:20,589 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@537a7cfb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:19:20,589 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:19:20,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b4f6ace{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:19:20,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d6430de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,STOPPED} 2024-12-13T17:19:20,590 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:19:20,590 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:19:20,590 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2145113159-172.17.0.2-1734110345678 (Datanode Uuid 8217e5cd-a926-4627-9cc3-4b6ed64b2cf6) service to localhost/127.0.0.1:34581 2024-12-13T17:19:20,590 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:19:20,591 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data3/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:20,591 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data4/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:20,591 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:19:20,591 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,591 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,591 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:36168 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36168 dst: /127.0.0.1:43399 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@eb34e8b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:20,594 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fe50e48{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:19:20,594 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:19:20,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d57dd6e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:19:20,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68946043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,STOPPED} 2024-12-13T17:19:20,595 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:36176 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36176 dst: /127.0.0.1:43399 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,597 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:19:20,595 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:36166 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36166 dst: /127.0.0.1:43399 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,597 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:19:20,597 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2145113159-172.17.0.2-1734110345678 (Datanode Uuid 399ff96b-0b78-4ee6-9e59-a9b2b34e2e47) service to localhost/127.0.0.1:34581 2024-12-13T17:19:20,596 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1073165494_22 at /127.0.0.1:36180 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:43399:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36180 dst: /127.0.0.1:43399 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 59978 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:20,597 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:19:20,598 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data1/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:20,598 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data2/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:20,598 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:19:20,599 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,602 WARN [RS:0;bcc0a2319104:43669.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,602 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C43669%2C1734110347446:(num 1734110348078) roll requested 2024-12-13T17:19:20,602 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.1734110360602 2024-12-13T17:19:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43669 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43669 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47126 deadline: 1734110370601, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-13T17:19:20,609 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-13T17:19:20,609 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110360602 2024-12-13T17:19:20,612 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33259:33259),(127.0.0.1/127.0.0.1:43331:43331)] 2024-12-13T17:19:20,612 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:20,612 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,613 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:20,613 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-13T17:19:20,614 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-13T17:19:20,614 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 2024-12-13T17:19:20,616 WARN [IPC Server handler 2 on default port 34581 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1017 2024-12-13T17:19:20,617 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 after 3ms 2024-12-13T17:19:24,620 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 after 4006ms 2024-12-13T17:19:32,707 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110360602 2024-12-13T17:19:32,709 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:32,710 WARN [DataStreamer for file /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110360602 block BP-2145113159-172.17.0.2-1734110345678:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]) is bad. 2024-12-13T17:19:32,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:57834 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:43595:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57834 dst: /127.0.0.1:43595 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:32,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:48034 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:37023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48034 dst: /127.0.0.1:37023 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:32,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43ee2e92{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:32,736 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e16b6dc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:19:32,736 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:19:32,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@200c1294{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:19:32,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@464ee75e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,STOPPED} 2024-12-13T17:19:32,740 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:19:32,740 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:19:32,740 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2145113159-172.17.0.2-1734110345678 (Datanode Uuid 80599a66-0713-47da-bc60-39cdcf7785c8) service to localhost/127.0.0.1:34581 2024-12-13T17:19:32,740 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:19:32,741 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data7/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:32,741 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data8/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:32,742 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:19:32,745 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]] 2024-12-13T17:19:32,746 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]] 2024-12-13T17:19:32,746 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C43669%2C1734110347446:(num 1734110360602) roll requested 2024-12-13T17:19:32,746 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.1734110372746 2024-12-13T17:19:32,749 WARN [Thread-727 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:32,750 WARN [Thread-727 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK], DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]) is bad. 2024-12-13T17:19:32,750 WARN [Thread-727 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741841_1023 2024-12-13T17:19:32,753 WARN [Thread-727 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK] 2024-12-13T17:19:32,758 WARN [Thread-727 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43399 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:32,757 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55098 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741842_1024 to mirror 127.0.0.1:43399 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:32,758 WARN [Thread-727 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]) is bad. 2024-12-13T17:19:32,758 WARN [Thread-727 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741842_1024 2024-12-13T17:19:32,758 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55098 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:32,758 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55098 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55098 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:32,759 WARN [Thread-727 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK] 2024-12-13T17:19:32,760 WARN [Thread-727 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:32,760 WARN [Thread-727 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:32,760 WARN [Thread-727 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741843_1025 2024-12-13T17:19:32,761 WARN [Thread-727 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] 2024-12-13T17:19:32,767 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110360602 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110372746 2024-12-13T17:19:32,767 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43331:43331),(127.0.0.1/127.0.0.1:46853:46853)] 2024-12-13T17:19:32,767 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:32,767 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110360602 is not closed yet, will try archiving it next time 2024-12-13T17:19:32,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37023 is added to blk_1073741840_1022 (size=2431) 2024-12-13T17:19:33,172 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:33,476 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c19ab81[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37023, datanodeUuid=249bf48f-06d9-42e4-a54b-9ee721721203, infoPort=43331, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741840_1022 to 127.0.0.1:43595 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,753 WARN [ResponseProcessor for block BP-2145113159-172.17.0.2-1734110345678:blk_1073741844_1026 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2145113159-172.17.0.2-1734110345678:blk_1073741844_1026 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,754 WARN [DataStreamer for file /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110372746 block BP-2145113159-172.17.0.2-1734110345678:blk_1073741844_1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK], DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:36,755 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:51082 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37023:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51082 dst: /127.0.0.1:37023 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,756 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55104 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55104 dst: /127.0.0.1:43667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cf54014{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:36,786 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5379ca01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:19:36,786 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:19:36,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60af4a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:19:36,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c0b5044{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,STOPPED} 2024-12-13T17:19:36,789 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:19:36,789 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:19:36,789 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2145113159-172.17.0.2-1734110345678 (Datanode Uuid 249bf48f-06d9-42e4-a54b-9ee721721203) service to localhost/127.0.0.1:34581 2024-12-13T17:19:36,790 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:19:36,790 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data9/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:36,791 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data10/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:19:36,791 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:19:36,794 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]] 2024-12-13T17:19:36,794 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]] 2024-12-13T17:19:36,795 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C43669%2C1734110347446:(num 1734110372746) roll requested 2024-12-13T17:19:36,795 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.1734110376795 2024-12-13T17:19:36,798 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,799 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:36,799 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741845_1028 2024-12-13T17:19:36,800 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43669 {}] regionserver.HRegion(8581): Flush requested on 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:36,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54d53dadb6f7d5f47a16f2daa01e3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:19:36,803 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43595 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,803 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55120 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741846_1029 to mirror 127.0.0.1:43595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,803 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]) is bad. 2024-12-13T17:19:36,803 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741846_1029 2024-12-13T17:19:36,803 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55120 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:36,804 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55120 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55120 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,804 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK] 2024-12-13T17:19:36,807 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,807 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55124 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741847_1030 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,807 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:36,808 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55124 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:36,808 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741847_1030 2024-12-13T17:19:36,808 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55124 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55124 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,809 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] 2024-12-13T17:19:36,812 WARN [Thread-739 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43399 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55128 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741848_1031 to mirror 127.0.0.1:43399 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,812 WARN [Thread-739 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]) is bad. 2024-12-13T17:19:36,812 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55128 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:36,812 WARN [Thread-739 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741848_1031 2024-12-13T17:19:36,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55128 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55128 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:36,813 WARN [Thread-739 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK] 2024-12-13T17:19:36,814 WARN [IPC Server handler 1 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T17:19:36,814 WARN [IPC Server handler 1 on default port 34581 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T17:19:36,815 WARN [IPC Server handler 1 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T17:19:36,819 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110372746 with entries=13, filesize=14.10 KB; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110376795 2024-12-13T17:19:36,819 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46853:46853)] 2024-12-13T17:19:36,819 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:36,819 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110372746 is not closed yet, will try archiving it next time 2024-12-13T17:19:36,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741844_1027 (size=14443) 2024-12-13T17:19:36,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/.tmp/info/f21d58406fca4cb49d3a384809931f3f is 1080, key is row0002/info:/1734110372743/Put/seqid=0 2024-12-13T17:19:36,823 WARN [Thread-741 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,823 WARN [Thread-741 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]) is bad. 2024-12-13T17:19:36,823 WARN [Thread-741 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741850_1033 2024-12-13T17:19:36,824 WARN [Thread-741 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK] 2024-12-13T17:19:36,825 WARN [Thread-741 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,825 WARN [Thread-741 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:36,825 WARN [Thread-741 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741851_1034 2024-12-13T17:19:36,825 WARN [Thread-741 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] 2024-12-13T17:19:36,826 WARN [Thread-741 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,827 WARN [Thread-741 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK], DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:36,827 WARN [Thread-741 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741852_1035 2024-12-13T17:19:36,827 WARN [Thread-741 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:36,828 WARN [Thread-741 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:36,828 WARN [Thread-741 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK], DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]) is bad. 2024-12-13T17:19:36,828 WARN [Thread-741 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741853_1036 2024-12-13T17:19:36,829 WARN [Thread-741 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK] 2024-12-13T17:19:36,829 WARN [IPC Server handler 2 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T17:19:36,830 WARN [IPC Server handler 2 on default port 34581 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T17:19:36,830 WARN [IPC Server handler 2 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T17:19:36,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741854_1037 (size=10347) 2024-12-13T17:19:37,018 WARN [sync.2 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]] 2024-12-13T17:19:37,018 WARN [sync.2 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]] 2024-12-13T17:19:37,018 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C43669%2C1734110347446:(num 1734110376795) roll requested 2024-12-13T17:19:37,019 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.1734110377019 2024-12-13T17:19:37,027 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43399 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55154 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741855_1038 to mirror 127.0.0.1:43399 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,027 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]) is bad. 2024-12-13T17:19:37,027 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741855_1038 2024-12-13T17:19:37,027 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55154 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:37,028 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55154 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55154 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,029 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK] 2024-12-13T17:19:37,033 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,033 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55168 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741856_1039 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,033 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:37,033 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741856_1039 2024-12-13T17:19:37,033 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55168 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:37,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55168 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55168 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,034 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] 2024-12-13T17:19:37,036 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,036 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK], DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]) is bad. 2024-12-13T17:19:37,036 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741857_1040 2024-12-13T17:19:37,037 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK] 2024-12-13T17:19:37,039 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,039 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55174 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741858_1041 to mirror 127.0.0.1:37023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,039 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:37,039 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741858_1041 2024-12-13T17:19:37,039 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55174 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:37,040 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:55174 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55174 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,040 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:37,041 WARN [IPC Server handler 3 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T17:19:37,041 WARN [IPC Server handler 3 on default port 34581 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T17:19:37,041 WARN [IPC Server handler 3 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T17:19:37,044 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110376795 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110377019 2024-12-13T17:19:37,044 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46853:46853)] 2024-12-13T17:19:37,044 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:37,044 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110372746 is not closed yet, will try archiving it next time 2024-12-13T17:19:37,045 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110376795 is not closed yet, will try archiving it next time 2024-12-13T17:19:37,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741849_1032 (size=1261) 2024-12-13T17:19:37,223 WARN [sync.4 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-13T17:19:37,224 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:37,224 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110376795 is not closed yet, will try archiving it next time 2024-12-13T17:19:37,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/.tmp/info/f21d58406fca4cb49d3a384809931f3f 2024-12-13T17:19:37,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/.tmp/info/f21d58406fca4cb49d3a384809931f3f as hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/info/f21d58406fca4cb49d3a384809931f3f 2024-12-13T17:19:37,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/info/f21d58406fca4cb49d3a384809931f3f, entries=5, sequenceid=12, filesize=10.1 K 2024-12-13T17:19:37,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 4ae54d53dadb6f7d5f47a16f2daa01e3 in 454ms, sequenceid=12, compaction requested=false 2024-12-13T17:19:37,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54d53dadb6f7d5f47a16f2daa01e3: 2024-12-13T17:19:37,278 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:19:37,437 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:19:37,441 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:19:37,443 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:19:37,443 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:19:37,443 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:19:37,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@739f1c9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:19:37,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@308286b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:19:37,447 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:37,447 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110360602 to hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs/bcc0a2319104%2C43669%2C1734110347446.1734110360602 2024-12-13T17:19:37,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21fd9034{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/java.io.tmpdir/jetty-localhost-40177-hadoop-hdfs-3_4_1-tests_jar-_-any-14742334765254060287/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:19:37,536 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15b6c308{HTTP/1.1, (http/1.1)}{localhost:40177} 2024-12-13T17:19:37,536 INFO [Time-limited test {}] server.Server(415): Started @150263ms 2024-12-13T17:19:37,537 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:19:37,749 WARN [master/bcc0a2319104:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,750 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C41247%2C1734110347301:(num 1734110347568) roll requested 2024-12-13T17:19:37,751 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C41247%2C1734110347301.1734110377750 2024-12-13T17:19:37,751 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,752 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,757 WARN [Thread-779 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,757 WARN [Thread-779 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]) is bad. 2024-12-13T17:19:37,757 WARN [Thread-779 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741860_1043 2024-12-13T17:19:37,758 WARN [Thread-779 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK] 2024-12-13T17:19:37,761 WARN [Thread-779 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,761 WARN [Thread-779 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK], DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]) is bad. 2024-12-13T17:19:37,761 WARN [Thread-779 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741861_1044 2024-12-13T17:19:37,762 WARN [Thread-779 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK] 2024-12-13T17:19:37,764 WARN [Thread-779 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,764 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:55208 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741862_1045 to mirror 127.0.0.1:37023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,765 WARN [Thread-779 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:37,765 WARN [Thread-779 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741862_1045 2024-12-13T17:19:37,765 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:55208 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:37,765 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:55208 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55208 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,765 WARN [Thread-779 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:37,768 WARN [Thread-779 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43595 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,767 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:55214 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741863_1046 to mirror 127.0.0.1:43595 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,768 WARN [Thread-779 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK]) is bad. 2024-12-13T17:19:37,768 WARN [Thread-779 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741863_1046 2024-12-13T17:19:37,768 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:55214 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:37,768 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:55214 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55214 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:37,768 WARN [Thread-779 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43595,DS-41432c6c-e811-4832-8cf1-bbe828893aee,DISK] 2024-12-13T17:19:37,769 WARN [IPC Server handler 3 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-13T17:19:37,769 WARN [IPC Server handler 3 on default port 34581 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-13T17:19:37,769 WARN [IPC Server handler 3 on default port 34581 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-13T17:19:37,773 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-13T17:19:37,773 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 with entries=93, filesize=46.04 KB; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110377750 2024-12-13T17:19:37,773 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46853:46853)] 2024-12-13T17:19:37,773 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 is not closed yet, will try archiving it next time 2024-12-13T17:19:37,773 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,773 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:37,773 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 2024-12-13T17:19:37,774 WARN [IPC Server handler 1 on default port 34581 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 has not been closed. Lease recovery is in progress. RecoveryId = 1048 for block blk_1073741830_1016 2024-12-13T17:19:37,774 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 after 1ms 2024-12-13T17:19:37,963 WARN [Thread-773 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:19:37,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdface41e7ac07e06 with lease ID 0xe8161f9f6bfb527b: from storage DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb node DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 8, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T17:19:37,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdface41e7ac07e06 with lease ID 0xe8161f9f6bfb527b: from storage DS-7f4aae4e-4c5a-4ab9-b1f0-bd0b5c5ad36c node DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:19:40,233 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@106230f7[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43667, datanodeUuid=61dfa4d7-b9d9-4b14-9600-d429686ff6c1, infoPort=46853, infoSecurePort=0, ipcPort=39257, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741844_1027 to 127.0.0.1:43595 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:40,234 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@675d6cfd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43667, datanodeUuid=61dfa4d7-b9d9-4b14-9600-d429686ff6c1, infoPort=46853, infoSecurePort=0, ipcPort=39257, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741854_1037 to 127.0.0.1:43595 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:41,231 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@675d6cfd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43667, datanodeUuid=61dfa4d7-b9d9-4b14-9600-d429686ff6c1, infoPort=46853, infoSecurePort=0, ipcPort=39257, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741849_1032 to 127.0.0.1:37023 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:41,776 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 after 4003ms 2024-12-13T17:19:42,804 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:19:42,805 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41656, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:19:42,967 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@14552070[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741837_1013 to 127.0.0.1:43595 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:42,967 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1a92cc8a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741835_1011 to 127.0.0.1:37023 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:43,153 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:19:43,156 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:19:43,966 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@14552070[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741829_1005 to 127.0.0.1:43595 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:43,966 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1a92cc8a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741831_1007 to 127.0.0.1:37023 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:45,970 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1a92cc8a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741832_1008 to 127.0.0.1:37023 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:45,970 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@14552070[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741828_1004 to 127.0.0.1:43595 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:46,970 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@14552070[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741826_1002 to 127.0.0.1:37023 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:48,968 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@14552070[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39883, datanodeUuid=8217e5cd-a926-4627-9cc3-4b6ed64b2cf6, infoPort=45089, infoSecurePort=0, ipcPort=39875, storageInfo=lv=-57;cid=testClusterID;nsid=224569308;c=1734110345678):Failed to transfer BP-2145113159-172.17.0.2-1734110345678:blk_1073741827_1003 to 127.0.0.1:43595 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:48,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:19:49,444 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T17:19:49,444 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T17:19:50,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:19:50,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741838_1014 (size=76) 2024-12-13T17:19:54,596 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 4ae54d53dadb6f7d5f47a16f2daa01e3, had cached 0 bytes from a total of 10347 2024-12-13T17:19:56,501 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.1734110396501 2024-12-13T17:19:56,506 WARN [Thread-805 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:37762 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741865_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741865_1049 to mirror 127.0.0.1:37023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:56,507 WARN [Thread-805 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741865_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:56,507 WARN [Thread-805 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741865_1049 2024-12-13T17:19:56,507 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:37762 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741865_1049] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:56,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1216703933_22 at /127.0.0.1:37762 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741865_1049] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37762 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:56,507 WARN [Thread-805 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:56,515 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110377019 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110396501 2024-12-13T17:19:56,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46853:46853),(127.0.0.1/127.0.0.1:45089:45089)] 2024-12-13T17:19:56,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:56,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110377019 is not closed yet, will try archiving it next time 2024-12-13T17:19:56,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741859_1042 (size=1618) 2024-12-13T17:19:56,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43669 {}] regionserver.HRegion(8581): Flush requested on 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:56,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 4ae54d53dadb6f7d5f47a16f2daa01e3 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-13T17:19:56,520 INFO [sync.3 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-13T17:19:56,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/.tmp/info/f699f83b89b449bd82878a2e378cf9da is 1080, key is row0007/info:/1734110376802/Put/seqid=0 2024-12-13T17:19:56,526 WARN [Thread-812 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,527 WARN [Thread-812 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741867_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK], DatanodeInfoWithStorage[127.0.0.1:39883,DS-ebf2f61b-23ad-4d5d-98d1-2d343a7ed7eb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:56,527 WARN [Thread-812 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741867_1051 2024-12-13T17:19:56,527 WARN [Thread-812 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:56,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741868_1052 (size=13583) 2024-12-13T17:19:56,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741868_1052 (size=13583) 2024-12-13T17:19:56,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=25 (bloomFilter=true), to=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/.tmp/info/f699f83b89b449bd82878a2e378cf9da 2024-12-13T17:19:56,536 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T17:19:56,536 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T17:19:56,536 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ec1b9dd to 127.0.0.1:64400 2024-12-13T17:19:56,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:56,536 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T17:19:56,536 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1563090508, stopped=false 2024-12-13T17:19:56,537 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=bcc0a2319104,41247,1734110347301 2024-12-13T17:19:56,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/.tmp/info/f699f83b89b449bd82878a2e378cf9da as hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/info/f699f83b89b449bd82878a2e378cf9da 2024-12-13T17:19:56,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/info/f699f83b89b449bd82878a2e378cf9da, entries=8, sequenceid=25, filesize=13.3 K 2024-12-13T17:19:56,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.50 KB/10757, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for 4ae54d53dadb6f7d5f47a16f2daa01e3 in 32ms, sequenceid=25, compaction requested=false 2024-12-13T17:19:56,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 4ae54d53dadb6f7d5f47a16f2daa01e3: 2024-12-13T17:19:56,551 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-13T17:19:56,551 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:19:56,552 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4ae54d53dadb6f7d5f47a16f2daa01e3/info/f699f83b89b449bd82878a2e378cf9da because midkey is the same as first or last row 2024-12-13T17:19:56,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:19:56,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:19:56,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:19:56,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:56,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:56,594 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T17:19:56,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:19:56,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:56,595 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,43669,1734110347446' ***** 2024-12-13T17:19:56,595 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T17:19:56,595 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,42631,1734110349102' ***** 2024-12-13T17:19:56,595 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T17:19:56,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:19:56,596 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T17:19:56,596 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T17:19:56,596 INFO [RS:1;bcc0a2319104:42631 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T17:19:56,596 INFO [RS:0;bcc0a2319104:43669 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T17:19:56,596 INFO [RS:1;bcc0a2319104:42631 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T17:19:56,596 INFO [RS:0;bcc0a2319104:43669 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T17:19:56,596 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,42631,1734110349102 2024-12-13T17:19:56,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:19:56,596 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T17:19:56,596 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(3579): Received CLOSE for ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:56,596 DEBUG [RS:1;bcc0a2319104:42631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:56,596 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T17:19:56,597 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,42631,1734110349102; all regions closed. 2024-12-13T17:19:56,597 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(3579): Received CLOSE for 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:56,597 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,43669,1734110347446 2024-12-13T17:19:56,598 DEBUG [RS:0;bcc0a2319104:43669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:56,598 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T17:19:56,598 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,42631,1734110349102 2024-12-13T17:19:56,598 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T17:19:56,598 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T17:19:56,598 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T17:19:56,598 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ec823748c41d7535bc958c58981d1660, disabling compactions & flushes 2024-12-13T17:19:56,598 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:19:56,599 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:56,599 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:56,599 ERROR [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... 2024-12-13T17:19:56,599 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-13T17:19:56,599 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. after waiting 0 ms 2024-12-13T17:19:56,599 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1603): Online Regions={ec823748c41d7535bc958c58981d1660=hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660., 4ae54d53dadb6f7d5f47a16f2daa01e3=TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3., 1588230740=hbase:meta,,1.1588230740} 2024-12-13T17:19:56,599 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:56,599 DEBUG [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,599 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:19:56,599 DEBUG [RS:1;bcc0a2319104:42631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:19:56,599 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ec823748c41d7535bc958c58981d1660 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T17:19:56,599 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4ae54d53dadb6f7d5f47a16f2daa01e3, ec823748c41d7535bc958c58981d1660 2024-12-13T17:19:56,599 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:19:56,599 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:19:56,599 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:19:56,600 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:19:56,600 INFO [RS:1;bcc0a2319104:42631 {}] hbase.ChoreService(370): Chore service for: regionserver/bcc0a2319104:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T17:19:56,600 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:19:56,600 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T17:19:56,600 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T17:19:56,600 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T17:19:56,600 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:19:56,600 INFO [RS:1;bcc0a2319104:42631 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42631 2024-12-13T17:19:56,600 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-13T17:19:56,601 WARN [RS_OPEN_META-regionserver/bcc0a2319104:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,601 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C43669%2C1734110347446.meta:.meta(num 1734110348445) roll requested 2024-12-13T17:19:56,601 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:19:56,601 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C43669%2C1734110347446.meta.1734110396601.meta 2024-12-13T17:19:56,601 ERROR [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server bcc0a2319104,43669,1734110347446: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,602 ERROR [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-13T17:19:56,604 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-13T17:19:56,605 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-13T17:19:56,605 WARN [Thread-820 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,605 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-13T17:19:56,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:37796 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741869_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741869_1053 to mirror 127.0.0.1:37023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:56,605 WARN [Thread-820 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741869_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:56,605 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-13T17:19:56,605 WARN [Thread-820 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741869_1053 2024-12-13T17:19:56,605 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:37796 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741869_1053] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-13T17:19:56,605 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "Verbose": false, "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 280094208 }, "NonHeapMemoryUsage": { "committed": 161284096, "init": 7667712, "max": -1, "used": 159500656 }, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-13T17:19:56,606 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:37796 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741869_1053] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37796 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:56,606 WARN [Thread-820 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:56,608 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41247 {}] master.MasterRpcServices(626): bcc0a2319104,43669,1734110347446 reported a fatal error: ***** ABORTING region server bcc0a2319104,43669,1734110347446: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-13T17:19:56,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bcc0a2319104,42631,1734110349102 2024-12-13T17:19:56,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:19:56,611 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-13T17:19:56,611 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110396601.meta 2024-12-13T17:19:56,614 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45089:45089),(127.0.0.1/127.0.0.1:46853:46853)] 2024-12-13T17:19:56,614 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta is not closed yet, will try archiving it next time 2024-12-13T17:19:56,614 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,614 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43399,DS-78bc1bfb-b468-4433-986c-32784cc53682,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,614 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta 2024-12-13T17:19:56,615 WARN [IPC Server handler 1 on default port 34581 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta has not been closed. Lease recovery is in progress. RecoveryId = 1055 for block blk_1073741834_1018 2024-12-13T17:19:56,615 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta after 1ms 2024-12-13T17:19:56,617 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660/.tmp/info/0b22fb40751747188f65047fb3cd1a69 is 45, key is default/info:d/1734110348965/Put/seqid=0 2024-12-13T17:19:56,619 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bcc0a2319104,42631,1734110349102] 2024-12-13T17:19:56,619 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bcc0a2319104,42631,1734110349102; numProcessing=1 2024-12-13T17:19:56,619 WARN [Thread-821 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37023 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:19:56,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:37818 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741871_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6]'}, localName='127.0.0.1:43667', datanodeUuid='61dfa4d7-b9d9-4b14-9600-d429686ff6c1', xmitsInProgress=0}:Exception transferring block BP-2145113159-172.17.0.2-1734110345678:blk_1073741871_1056 to mirror 127.0.0.1:37023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:56,619 WARN [Thread-821 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2145113159-172.17.0.2-1734110345678:blk_1073741871_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK], DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK]) is bad. 2024-12-13T17:19:56,619 WARN [Thread-821 {}] hdfs.DataStreamer(1850): Abandoning BP-2145113159-172.17.0.2-1734110345678:blk_1073741871_1056 2024-12-13T17:19:56,619 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:37818 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741871_1056] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-13T17:19:56,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302011151_22 at /127.0.0.1:37818 [Receiving block BP-2145113159-172.17.0.2-1734110345678:blk_1073741871_1056] {}] datanode.DataXceiver(331): 127.0.0.1:43667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37818 dst: /127.0.0.1:43667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:19:56,620 WARN [Thread-821 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37023,DS-ffacaca1-e1d7-4c23-951f-eaa7ac41886f,DISK] 2024-12-13T17:19:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741872_1057 (size=5037) 2024-12-13T17:19:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741872_1057 (size=5037) 2024-12-13T17:19:56,625 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660/.tmp/info/0b22fb40751747188f65047fb3cd1a69 2024-12-13T17:19:56,627 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bcc0a2319104,42631,1734110349102 already deleted, retry=false 2024-12-13T17:19:56,627 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bcc0a2319104,42631,1734110349102 expired; onlineServers=1 2024-12-13T17:19:56,631 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660/.tmp/info/0b22fb40751747188f65047fb3cd1a69 as hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660/info/0b22fb40751747188f65047fb3cd1a69 2024-12-13T17:19:56,637 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660/info/0b22fb40751747188f65047fb3cd1a69, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T17:19:56,638 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for ec823748c41d7535bc958c58981d1660 in 39ms, sequenceid=6, compaction requested=false 2024-12-13T17:19:56,643 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/data/hbase/namespace/ec823748c41d7535bc958c58981d1660/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T17:19:56,643 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:56,643 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ec823748c41d7535bc958c58981d1660: 2024-12-13T17:19:56,643 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734110348537.ec823748c41d7535bc958c58981d1660. 2024-12-13T17:19:56,644 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4ae54d53dadb6f7d5f47a16f2daa01e3, disabling compactions & flushes 2024-12-13T17:19:56,644 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,644 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,644 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. after waiting 0 ms 2024-12-13T17:19:56,644 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,644 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4ae54d53dadb6f7d5f47a16f2daa01e3: 2024-12-13T17:19:56,644 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:19:56,719 INFO [RS:1;bcc0a2319104:42631 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,42631,1734110349102; zookeeper connection closed. 2024-12-13T17:19:56,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42631-0x10020671c920003, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:19:56,720 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3521485d {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3521485d 2024-12-13T17:19:56,800 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(3579): Received CLOSE for 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:56,800 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T17:19:56,800 DEBUG [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4ae54d53dadb6f7d5f47a16f2daa01e3 2024-12-13T17:19:56,800 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4ae54d53dadb6f7d5f47a16f2daa01e3, disabling compactions & flushes 2024-12-13T17:19:56,800 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:19:56,801 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,801 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. after waiting 0 ms 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4ae54d53dadb6f7d5f47a16f2daa01e3: 2024-12-13T17:19:56,801 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:19:56,802 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1734110349217.4ae54d53dadb6f7d5f47a16f2daa01e3. 2024-12-13T17:19:56,802 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-13T17:19:56,920 DEBUG [Close-WAL-Writer-2 {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 is not closed yet, will try archiving it next time 2024-12-13T17:19:56,921 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110372746 to hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs/bcc0a2319104%2C43669%2C1734110347446.1734110372746 2024-12-13T17:19:56,925 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110376795 to hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs/bcc0a2319104%2C43669%2C1734110347446.1734110376795 2024-12-13T17:19:56,928 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110377019 to hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs/bcc0a2319104%2C43669%2C1734110347446.1734110377019 2024-12-13T17:19:56,939 INFO [regionserver/bcc0a2319104:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-13T17:19:56,940 INFO [regionserver/bcc0a2319104:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-13T17:19:57,001 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-13T17:19:57,001 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,43669,1734110347446; all regions closed. 2024-12-13T17:19:57,002 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446 2024-12-13T17:19:57,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741870_1054 (size=93) 2024-12-13T17:19:57,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741870_1054 (size=93) 2024-12-13T17:19:57,184 INFO [regionserver/bcc0a2319104:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:19:57,961 INFO [regionserver/bcc0a2319104:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:19:58,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741859_1042 (size=1618) 2024-12-13T17:20:00,617 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta after 4003ms 2024-12-13T17:20:01,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:01,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:01,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:01,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:01,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:01,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:02,009 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-13T17:20:02,011 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446 2024-12-13T17:20:02,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741866_1050 (size=13280) 2024-12-13T17:20:02,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741866_1050 (size=13280) 2024-12-13T17:20:02,173 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:20:02,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:02,193 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:02,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:02,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:02,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:02,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:04,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T17:20:07,017 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-13T17:20:07,018 DEBUG [RS:0;bcc0a2319104:43669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:20:07,018 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:20:07,018 INFO [RS:0;bcc0a2319104:43669 {}] hbase.ChoreService(370): Chore service for: regionserver/bcc0a2319104:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T17:20:07,019 INFO [RS:0;bcc0a2319104:43669 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43669 2024-12-13T17:20:07,019 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:20:07,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bcc0a2319104,43669,1734110347446 2024-12-13T17:20:07,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:20:07,068 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bcc0a2319104,43669,1734110347446] 2024-12-13T17:20:07,069 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bcc0a2319104,43669,1734110347446; numProcessing=2 2024-12-13T17:20:07,077 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bcc0a2319104,43669,1734110347446 already deleted, retry=false 2024-12-13T17:20:07,077 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bcc0a2319104,43669,1734110347446 expired; onlineServers=0 2024-12-13T17:20:07,077 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,41247,1734110347301' ***** 2024-12-13T17:20:07,077 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T17:20:07,077 DEBUG [M:0;bcc0a2319104:41247 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73b5f0d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:20:07,077 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,41247,1734110347301 2024-12-13T17:20:07,077 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,41247,1734110347301; all regions closed. 2024-12-13T17:20:07,077 DEBUG [M:0;bcc0a2319104:41247 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:20:07,077 DEBUG [M:0;bcc0a2319104:41247 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T17:20:07,078 DEBUG [M:0;bcc0a2319104:41247 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T17:20:07,078 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T17:20:07,078 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110347750 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110347750,5,FailOnTimeoutGroup] 2024-12-13T17:20:07,078 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110347750 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110347750,5,FailOnTimeoutGroup] 2024-12-13T17:20:07,078 INFO [M:0;bcc0a2319104:41247 {}] hbase.ChoreService(370): Chore service for: master/bcc0a2319104:0 had [] on shutdown 2024-12-13T17:20:07,078 DEBUG [M:0;bcc0a2319104:41247 {}] master.HMaster(1733): Stopping service threads 2024-12-13T17:20:07,078 INFO [M:0;bcc0a2319104:41247 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T17:20:07,078 INFO [M:0;bcc0a2319104:41247 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T17:20:07,079 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T17:20:07,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T17:20:07,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:07,085 DEBUG [M:0;bcc0a2319104:41247 {}] zookeeper.ZKUtil(347): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T17:20:07,086 WARN [M:0;bcc0a2319104:41247 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T17:20:07,086 INFO [M:0;bcc0a2319104:41247 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T17:20:07,086 INFO [M:0;bcc0a2319104:41247 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T17:20:07,086 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:20:07,086 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:07,086 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:20:07,086 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:07,086 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:20:07,086 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:07,086 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.08 KB heapSize=49.29 KB 2024-12-13T17:20:07,088 WARN [sync.3 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]] 2024-12-13T17:20:07,088 WARN [sync.3 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43667,DS-dabb14ee-4400-4e52-95df-8dd261c84837,DISK]] 2024-12-13T17:20:07,088 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C41247%2C1734110347301:(num 1734110377750) roll requested 2024-12-13T17:20:07,088 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C41247%2C1734110347301.1734110407088 2024-12-13T17:20:07,099 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110377750 with entries=1, filesize=349 B; new WAL /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110407088 2024-12-13T17:20:07,099 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46853:46853),(127.0.0.1/127.0.0.1:45089:45089)] 2024-12-13T17:20:07,099 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 is not closed yet, will try archiving it next time 2024-12-13T17:20:07,099 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110377750 is not closed yet, will try archiving it next time 2024-12-13T17:20:07,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741864_1047 (size=357) 2024-12-13T17:20:07,107 DEBUG [M:0;bcc0a2319104:41247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c805b6e59c34f7a9cd49bb781ce1e9e is 82, key is hbase:meta,,1/info:regioninfo/1734110348473/Put/seqid=0 2024-12-13T17:20:07,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741874_1059 (size=5672) 2024-12-13T17:20:07,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741874_1059 (size=5672) 2024-12-13T17:20:07,112 INFO [M:0;bcc0a2319104:41247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c805b6e59c34f7a9cd49bb781ce1e9e 2024-12-13T17:20:07,129 DEBUG [M:0;bcc0a2319104:41247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/49712236a271421c9c0b01076c74ab06 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734110349698/Put/seqid=0 2024-12-13T17:20:07,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741875_1060 (size=7465) 2024-12-13T17:20:07,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741875_1060 (size=7465) 2024-12-13T17:20:07,135 INFO [M:0;bcc0a2319104:41247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.41 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/49712236a271421c9c0b01076c74ab06 2024-12-13T17:20:07,154 DEBUG [M:0;bcc0a2319104:41247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00729db73e5e483abb0a5c4d2feb61f3 is 69, key is bcc0a2319104,42631,1734110349102/rs:state/1734110349157/Put/seqid=0 2024-12-13T17:20:07,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741876_1061 (size=5224) 2024-12-13T17:20:07,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741876_1061 (size=5224) 2024-12-13T17:20:07,159 INFO [M:0;bcc0a2319104:41247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00729db73e5e483abb0a5c4d2feb61f3 2024-12-13T17:20:07,169 INFO [RS:0;bcc0a2319104:43669 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,43669,1734110347446; zookeeper connection closed. 2024-12-13T17:20:07,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:20:07,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43669-0x10020671c920001, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:20:07,169 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6fb3e2da {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6fb3e2da 2024-12-13T17:20:07,169 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-13T17:20:07,178 DEBUG [M:0;bcc0a2319104:41247 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2cd0e2da15e146548614feb54946156d is 52, key is load_balancer_on/state:d/1734110349086/Put/seqid=0 2024-12-13T17:20:07,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741877_1062 (size=5056) 2024-12-13T17:20:07,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741877_1062 (size=5056) 2024-12-13T17:20:07,183 INFO [M:0;bcc0a2319104:41247 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2cd0e2da15e146548614feb54946156d 2024-12-13T17:20:07,189 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5c805b6e59c34f7a9cd49bb781ce1e9e as hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5c805b6e59c34f7a9cd49bb781ce1e9e 2024-12-13T17:20:07,194 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5c805b6e59c34f7a9cd49bb781ce1e9e, entries=8, sequenceid=97, filesize=5.5 K 2024-12-13T17:20:07,195 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/49712236a271421c9c0b01076c74ab06 as hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/49712236a271421c9c0b01076c74ab06 2024-12-13T17:20:07,201 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/49712236a271421c9c0b01076c74ab06, entries=11, sequenceid=97, filesize=7.3 K 2024-12-13T17:20:07,203 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00729db73e5e483abb0a5c4d2feb61f3 as hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00729db73e5e483abb0a5c4d2feb61f3 2024-12-13T17:20:07,208 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00729db73e5e483abb0a5c4d2feb61f3, entries=2, sequenceid=97, filesize=5.1 K 2024-12-13T17:20:07,210 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2cd0e2da15e146548614feb54946156d as hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2cd0e2da15e146548614feb54946156d 2024-12-13T17:20:07,215 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2cd0e2da15e146548614feb54946156d, entries=1, sequenceid=97, filesize=4.9 K 2024-12-13T17:20:07,216 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.08 KB/41039, heapSize ~49.23 KB/50408, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=97, compaction requested=false 2024-12-13T17:20:07,218 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:07,218 DEBUG [M:0;bcc0a2319104:41247 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:20:07,218 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301 2024-12-13T17:20:07,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741873_1058 (size=493) 2024-12-13T17:20:07,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43667 is added to blk_1073741873_1058 (size=493) 2024-12-13T17:20:07,278 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:20:07,503 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 is not closed yet, will try archiving it next time 2024-12-13T17:20:07,504 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110377750 to hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/oldWALs/bcc0a2319104%2C41247%2C1734110347301.1734110377750 2024-12-13T17:20:07,512 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/oldWALs/bcc0a2319104%2C41247%2C1734110347301.1734110377750 to hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/oldWALs/bcc0a2319104%2C41247%2C1734110347301.1734110377750$masterlocalwal$ 2024-12-13T17:20:07,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:07,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:08,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:08,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:09,524 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:20:09,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:09,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:09,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:09,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:09,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:09,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:09,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:09,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:10,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39883 is added to blk_1073741864_1047 (size=357) 2024-12-13T17:20:10,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:10,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:11,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:11,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:12,221 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-13T17:20:12,221 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:20:12,221 INFO [M:0;bcc0a2319104:41247 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T17:20:12,222 INFO [M:0;bcc0a2319104:41247 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41247 2024-12-13T17:20:12,269 DEBUG [M:0;bcc0a2319104:41247 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/bcc0a2319104,41247,1734110347301 already deleted, retry=false 2024-12-13T17:20:12,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:20:12,378 INFO [M:0;bcc0a2319104:41247 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,41247,1734110347301; zookeeper connection closed. 2024-12-13T17:20:12,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41247-0x10020671c920000, quorum=127.0.0.1:64400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:20:12,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21fd9034{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:12,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15b6c308{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:20:12,422 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:20:12,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@308286b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:20:12,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@739f1c9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,STOPPED} 2024-12-13T17:20:12,426 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:20:12,426 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:20:12,426 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2145113159-172.17.0.2-1734110345678 (Datanode Uuid 8217e5cd-a926-4627-9cc3-4b6ed64b2cf6) service to localhost/127.0.0.1:34581 2024-12-13T17:20:12,426 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:20:12,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data3/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:12,428 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data4/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:12,428 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:20:12,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@310c7b0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:12,432 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64cbae84{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:20:12,432 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:20:12,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65d69bd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:20:12,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5280103b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,STOPPED} 2024-12-13T17:20:12,433 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:20:12,433 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:20:12,433 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:20:12,433 WARN [BP-2145113159-172.17.0.2-1734110345678 heartbeating to localhost/127.0.0.1:34581 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2145113159-172.17.0.2-1734110345678 (Datanode Uuid 61dfa4d7-b9d9-4b14-9600-d429686ff6c1) service to localhost/127.0.0.1:34581 2024-12-13T17:20:12,434 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data5/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:12,434 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/cluster_afdd3c31-99f8-d5e1-0851-da26dbc72240/dfs/data/data6/current/BP-2145113159-172.17.0.2-1734110345678 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:12,434 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:20:12,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@410de187{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:20:12,440 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a289a6d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:20:12,440 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:20:12,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d3cdd43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:20:12,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a9f818b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir/,STOPPED} 2024-12-13T17:20:12,447 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T17:20:12,474 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-13T17:20:12,480 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=86 (was 66) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:34581 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34581 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:34581 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34581 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$797/0x00007fac0cb826f8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$797/0x00007fac0cb826f8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34581 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:34581 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34581 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:34581 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$797/0x00007fac0cb826f8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-10-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:34581 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=410 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=114 (was 219), ProcessCount=11 (was 11), AvailableMemoryMB=3706 (was 4366) 2024-12-13T17:20:12,486 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=86, OpenFileDescriptor=410, MaxFileDescriptor=1048576, SystemLoadAverage=114, ProcessCount=11, AvailableMemoryMB=3706 2024-12-13T17:20:12,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T17:20:12,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.log.dir so I do NOT create it in target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b 2024-12-13T17:20:12,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/f4f30a41-d59d-e2ea-a952-0a09ca249751/hadoop.tmp.dir so I do NOT create it in target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b 2024-12-13T17:20:12,486 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105, deleteOnExit=true 2024-12-13T17:20:12,486 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/test.cache.data in system properties and HBase conf 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir in system properties and HBase conf 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T17:20:12,487 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:20:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/nfs.dump.dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T17:20:12,488 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T17:20:12,498 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:20:12,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:12,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:12,789 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:12,794 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:20:12,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:20:12,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:20:12,795 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:20:12,798 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:12,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13ad263a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:20:12,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37b4f391{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:20:12,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:12,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2be233f1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir/jetty-localhost-39549-hadoop-hdfs-3_4_1-tests_jar-_-any-15284729392058064014/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:20:12,887 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a2091a3{HTTP/1.1, (http/1.1)}{localhost:39549} 2024-12-13T17:20:12,887 INFO [Time-limited test {}] server.Server(415): Started @185614ms 2024-12-13T17:20:12,897 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:20:13,126 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:13,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:20:13,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:20:13,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:20:13,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:20:13,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36b4a933{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:20:13,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76f2351a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:20:13,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3684beb3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir/jetty-localhost-42771-hadoop-hdfs-3_4_1-tests_jar-_-any-9415031855560436055/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:13,220 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6cdd29e5{HTTP/1.1, (http/1.1)}{localhost:42771} 2024-12-13T17:20:13,220 INFO [Time-limited test {}] server.Server(415): Started @185947ms 2024-12-13T17:20:13,221 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:20:13,243 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:13,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:20:13,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:20:13,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:20:13,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:20:13,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55558130{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:20:13,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@365421ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:20:13,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@cc0feb2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir/jetty-localhost-40113-hadoop-hdfs-3_4_1-tests_jar-_-any-6234984367107766171/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:13,337 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a4cb381{HTTP/1.1, (http/1.1)}{localhost:40113} 2024-12-13T17:20:13,337 INFO [Time-limited test {}] server.Server(415): Started @186065ms 2024-12-13T17:20:13,338 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:20:13,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:13,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:13,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:13,826 WARN [Thread-961 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data2/current/BP-602853586-172.17.0.2-1734110412509/current, will proceed with Du for space computation calculation, 2024-12-13T17:20:13,826 WARN [Thread-960 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data1/current/BP-602853586-172.17.0.2-1734110412509/current, will proceed with Du for space computation calculation, 2024-12-13T17:20:13,840 WARN [Thread-924 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:20:13,842 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x756793ed294090d1 with lease ID 0x3252334d4d477a32: Processing first storage report for DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac from datanode DatanodeRegistration(127.0.0.1:40907, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=46529, infoSecurePort=0, ipcPort=35957, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509) 2024-12-13T17:20:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x756793ed294090d1 with lease ID 0x3252334d4d477a32: from storage DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac node DatanodeRegistration(127.0.0.1:40907, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=46529, infoSecurePort=0, ipcPort=35957, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x756793ed294090d1 with lease ID 0x3252334d4d477a32: Processing first storage report for DS-42aa5d66-ccc0-4dd3-a06e-69160aa2078f from datanode DatanodeRegistration(127.0.0.1:40907, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=46529, infoSecurePort=0, ipcPort=35957, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509) 2024-12-13T17:20:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x756793ed294090d1 with lease ID 0x3252334d4d477a32: from storage DS-42aa5d66-ccc0-4dd3-a06e-69160aa2078f node DatanodeRegistration(127.0.0.1:40907, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=46529, infoSecurePort=0, ipcPort=35957, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:13,962 WARN [Thread-972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data4/current/BP-602853586-172.17.0.2-1734110412509/current, will proceed with Du for space computation calculation, 2024-12-13T17:20:13,962 WARN [Thread-971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data3/current/BP-602853586-172.17.0.2-1734110412509/current, will proceed with Du for space computation calculation, 2024-12-13T17:20:13,979 WARN [Thread-947 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:20:13,981 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe624ea255602b332 with lease ID 0x3252334d4d477a33: Processing first storage report for DS-00efdfd4-455c-42c6-87d0-80262b2540ef from datanode DatanodeRegistration(127.0.0.1:44343, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=34611, infoSecurePort=0, ipcPort=38617, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509) 2024-12-13T17:20:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe624ea255602b332 with lease ID 0x3252334d4d477a33: from storage DS-00efdfd4-455c-42c6-87d0-80262b2540ef node DatanodeRegistration(127.0.0.1:44343, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=34611, infoSecurePort=0, ipcPort=38617, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe624ea255602b332 with lease ID 0x3252334d4d477a33: Processing first storage report for DS-e57a1a5d-c246-461a-bbaa-fbab323a2ffc from datanode DatanodeRegistration(127.0.0.1:44343, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=34611, infoSecurePort=0, ipcPort=38617, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509) 2024-12-13T17:20:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe624ea255602b332 with lease ID 0x3252334d4d477a33: from storage DS-e57a1a5d-c246-461a-bbaa-fbab323a2ffc node DatanodeRegistration(127.0.0.1:44343, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=34611, infoSecurePort=0, ipcPort=38617, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:14,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-13T17:20:14,067 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b 2024-12-13T17:20:14,072 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/zookeeper_0, clientPort=57192, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T17:20:14,073 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=57192 2024-12-13T17:20:14,074 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:14,076 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:14,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:20:14,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:20:14,088 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1 with version=8 2024-12-13T17:20:14,088 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/hbase-staging 2024-12-13T17:20:14,089 INFO [Time-limited test {}] client.ConnectionUtils(129): master/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:20:14,090 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38535 2024-12-13T17:20:14,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:14,092 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:14,094 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38535 connecting to ZooKeeper ensemble=127.0.0.1:57192 2024-12-13T17:20:14,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385350x0, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:20:14,141 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38535-0x100206821760000 connected 2024-12-13T17:20:14,211 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:20:14,213 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:20:14,215 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:20:14,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38535 2024-12-13T17:20:14,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38535 2024-12-13T17:20:14,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38535 2024-12-13T17:20:14,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38535 2024-12-13T17:20:14,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38535 2024-12-13T17:20:14,219 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1, hbase.cluster.distributed=false 2024-12-13T17:20:14,237 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:20:14,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:20:14,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:20:14,237 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:20:14,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:20:14,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:20:14,237 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:20:14,237 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:20:14,238 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36927 2024-12-13T17:20:14,238 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T17:20:14,239 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T17:20:14,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:14,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:14,243 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36927 connecting to ZooKeeper ensemble=127.0.0.1:57192 2024-12-13T17:20:14,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369270x0, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:20:14,252 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36927-0x100206821760001 connected 2024-12-13T17:20:14,252 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:20:14,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:20:14,254 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:20:14,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36927 2024-12-13T17:20:14,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36927 2024-12-13T17:20:14,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36927 2024-12-13T17:20:14,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36927 2024-12-13T17:20:14,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36927 2024-12-13T17:20:14,256 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/bcc0a2319104,38535,1734110414089 2024-12-13T17:20:14,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:20:14,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:20:14,265 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bcc0a2319104,38535,1734110414089 2024-12-13T17:20:14,268 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bcc0a2319104:38535 2024-12-13T17:20:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:20:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:20:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,273 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:20:14,273 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bcc0a2319104,38535,1734110414089 from backup master directory 2024-12-13T17:20:14,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bcc0a2319104,38535,1734110414089 2024-12-13T17:20:14,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:20:14,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:20:14,281 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:20:14,281 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bcc0a2319104,38535,1734110414089 2024-12-13T17:20:14,281 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:20:14,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:20:14,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:20:14,294 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/hbase.id with ID: 3fa4ab65-ef40-4467-8e9f-969ca875d991 2024-12-13T17:20:14,304 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:14,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:20:14,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:20:14,322 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:20:14,322 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T17:20:14,323 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:20:14,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:20:14,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:20:14,332 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store 2024-12-13T17:20:14,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:20:14,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:20:14,339 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:14,339 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:20:14,339 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:14,339 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:14,339 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:20:14,340 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:14,340 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:20:14,340 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:20:14,341 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/.initializing 2024-12-13T17:20:14,341 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089 2024-12-13T17:20:14,344 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C38535%2C1734110414089, suffix=, logDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089, archiveDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/oldWALs, maxLogs=10 2024-12-13T17:20:14,344 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C38535%2C1734110414089.1734110414344 2024-12-13T17:20:14,351 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 2024-12-13T17:20:14,352 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46529:46529),(127.0.0.1/127.0.0.1:34611:34611)] 2024-12-13T17:20:14,352 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:20:14,352 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:14,352 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,352 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T17:20:14,355 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:20:14,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T17:20:14,358 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:20:14,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T17:20:14,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:20:14,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T17:20:14,362 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:20:14,364 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,364 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,365 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T17:20:14,366 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:20:14,368 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:20:14,369 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850876, jitterRate=0.08194512128829956}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T17:20:14,369 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:20:14,369 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T17:20:14,372 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70fa436d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:20:14,373 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T17:20:14,373 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T17:20:14,373 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T17:20:14,374 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T17:20:14,374 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-13T17:20:14,374 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-13T17:20:14,374 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T17:20:14,376 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T17:20:14,377 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T17:20:14,385 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T17:20:14,386 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T17:20:14,386 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T17:20:14,393 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T17:20:14,394 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T17:20:14,395 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T17:20:14,402 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T17:20:14,403 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T17:20:14,410 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T17:20:14,412 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T17:20:14,422 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T17:20:14,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:20:14,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:20:14,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,431 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=bcc0a2319104,38535,1734110414089, sessionid=0x100206821760000, setting cluster-up flag (Was=false) 2024-12-13T17:20:14,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,472 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T17:20:14,474 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,38535,1734110414089 2024-12-13T17:20:14,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:14,514 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T17:20:14,516 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,38535,1734110414089 2024-12-13T17:20:14,521 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T17:20:14,522 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T17:20:14,522 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T17:20:14,522 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bcc0a2319104,38535,1734110414089 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T17:20:14,523 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:20:14,523 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:20:14,523 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:20:14,523 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:20:14,523 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bcc0a2319104:0, corePoolSize=10, maxPoolSize=10 2024-12-13T17:20:14,523 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,523 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:20:14,524 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,525 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734110444525 2024-12-13T17:20:14,525 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T17:20:14,525 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T17:20:14,525 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T17:20:14,525 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T17:20:14,525 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T17:20:14,526 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T17:20:14,526 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:20:14,526 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,526 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T17:20:14,526 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T17:20:14,526 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T17:20:14,526 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T17:20:14,527 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T17:20:14,527 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T17:20:14,527 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110414527,5,FailOnTimeoutGroup] 2024-12-13T17:20:14,527 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110414527,5,FailOnTimeoutGroup] 2024-12-13T17:20:14,527 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,527 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,527 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T17:20:14,527 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,527 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,527 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:20:14,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:20:14,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:20:14,536 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T17:20:14,536 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1 2024-12-13T17:20:14,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:20:14,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:20:14,544 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:14,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:20:14,547 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:20:14,547 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:20:14,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:20:14,550 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:20:14,550 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:20:14,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:20:14,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:20:14,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:14,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:20:14,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740 2024-12-13T17:20:14,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740 2024-12-13T17:20:14,556 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:20:14,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:20:14,559 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:20:14,560 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741885, jitterRate=-0.056644588708877563}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:20:14,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:20:14,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:20:14,560 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:20:14,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:20:14,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:20:14,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:20:14,560 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:20:14,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:20:14,561 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:20:14,561 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T17:20:14,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T17:20:14,562 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T17:20:14,563 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T17:20:14,567 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bcc0a2319104:36927 2024-12-13T17:20:14,568 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1008): ClusterId : 3fa4ab65-ef40-4467-8e9f-969ca875d991 2024-12-13T17:20:14,568 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T17:20:14,578 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T17:20:14,578 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T17:20:14,586 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T17:20:14,586 DEBUG [RS:0;bcc0a2319104:36927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d43e018, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:20:14,587 DEBUG [RS:0;bcc0a2319104:36927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@431c25dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:20:14,587 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T17:20:14,587 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T17:20:14,587 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T17:20:14,587 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(3073): reportForDuty to master=bcc0a2319104,38535,1734110414089 with isa=bcc0a2319104/172.17.0.2:36927, startcode=1734110414236 2024-12-13T17:20:14,587 DEBUG [RS:0;bcc0a2319104:36927 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:20:14,589 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45917, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:20:14,589 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38535 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bcc0a2319104,36927,1734110414236 2024-12-13T17:20:14,590 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38535 {}] master.ServerManager(486): Registering regionserver=bcc0a2319104,36927,1734110414236 2024-12-13T17:20:14,591 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1 2024-12-13T17:20:14,591 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:42927 2024-12-13T17:20:14,591 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T17:20:14,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:20:14,598 DEBUG [RS:0;bcc0a2319104:36927 {}] zookeeper.ZKUtil(111): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bcc0a2319104,36927,1734110414236 2024-12-13T17:20:14,598 WARN [RS:0;bcc0a2319104:36927 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:20:14,598 INFO [RS:0;bcc0a2319104:36927 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:20:14,598 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236 2024-12-13T17:20:14,598 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bcc0a2319104,36927,1734110414236] 2024-12-13T17:20:14,603 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T17:20:14,603 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T17:20:14,605 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T17:20:14,605 INFO [RS:0;bcc0a2319104:36927 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:20:14,605 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,606 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T17:20:14,607 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,607 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,608 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,608 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,608 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:20:14,608 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:20:14,608 DEBUG [RS:0;bcc0a2319104:36927 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:20:14,608 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,609 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,609 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,609 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,609 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,36927,1734110414236-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:20:14,623 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T17:20:14,623 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,36927,1734110414236-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:14,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:14,634 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.Replication(204): bcc0a2319104,36927,1734110414236 started 2024-12-13T17:20:14,634 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1767): Serving as bcc0a2319104,36927,1734110414236, RpcServer on bcc0a2319104/172.17.0.2:36927, sessionid=0x100206821760001 2024-12-13T17:20:14,634 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T17:20:14,634 DEBUG [RS:0;bcc0a2319104:36927 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bcc0a2319104,36927,1734110414236 2024-12-13T17:20:14,634 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,36927,1734110414236' 2024-12-13T17:20:14,634 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T17:20:14,635 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T17:20:14,635 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T17:20:14,635 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T17:20:14,635 DEBUG [RS:0;bcc0a2319104:36927 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bcc0a2319104,36927,1734110414236 2024-12-13T17:20:14,635 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,36927,1734110414236' 2024-12-13T17:20:14,636 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T17:20:14,636 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T17:20:14,636 DEBUG [RS:0;bcc0a2319104:36927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T17:20:14,636 INFO [RS:0;bcc0a2319104:36927 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T17:20:14,636 INFO [RS:0;bcc0a2319104:36927 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T17:20:14,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:14,714 WARN [bcc0a2319104:38535 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T17:20:14,741 INFO [RS:0;bcc0a2319104:36927 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C36927%2C1734110414236, suffix=, logDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236, archiveDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/oldWALs, maxLogs=32 2024-12-13T17:20:14,743 INFO [RS:0;bcc0a2319104:36927 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:14,751 INFO [RS:0;bcc0a2319104:36927 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:14,751 DEBUG [RS:0;bcc0a2319104:36927 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46529:46529),(127.0.0.1/127.0.0.1:34611:34611)] 2024-12-13T17:20:14,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:14,964 DEBUG [bcc0a2319104:38535 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-13T17:20:14,965 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bcc0a2319104,36927,1734110414236 2024-12-13T17:20:14,968 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,36927,1734110414236, state=OPENING 2024-12-13T17:20:15,011 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T17:20:15,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:15,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:15,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=bcc0a2319104,36927,1734110414236}] 2024-12-13T17:20:15,021 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:20:15,021 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:20:15,177 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,36927,1734110414236 2024-12-13T17:20:15,177 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T17:20:15,182 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55728, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T17:20:15,188 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T17:20:15,189 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:20:15,191 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C36927%2C1734110414236.meta, suffix=.meta, logDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236, archiveDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/oldWALs, maxLogs=32 2024-12-13T17:20:15,193 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta 2024-12-13T17:20:15,200 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta 2024-12-13T17:20:15,201 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34611:34611),(127.0.0.1/127.0.0.1:46529:46529)] 2024-12-13T17:20:15,201 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:20:15,201 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T17:20:15,201 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T17:20:15,201 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T17:20:15,201 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T17:20:15,201 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:15,202 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T17:20:15,202 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T17:20:15,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:20:15,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:20:15,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:15,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:20:15,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:20:15,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:20:15,206 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:15,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:20:15,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:20:15,207 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:20:15,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:15,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:20:15,208 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740 2024-12-13T17:20:15,209 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740 2024-12-13T17:20:15,211 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:20:15,212 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:20:15,213 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745085, jitterRate=-0.05257660150527954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:20:15,213 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:20:15,214 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734110415177 2024-12-13T17:20:15,216 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T17:20:15,216 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T17:20:15,216 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,36927,1734110414236 2024-12-13T17:20:15,217 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,36927,1734110414236, state=OPEN 2024-12-13T17:20:15,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:20:15,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:20:15,250 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:20:15,250 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:20:15,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T17:20:15,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=bcc0a2319104,36927,1734110414236 in 229 msec 2024-12-13T17:20:15,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T17:20:15,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 692 msec 2024-12-13T17:20:15,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 738 msec 2024-12-13T17:20:15,259 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734110415259, completionTime=-1 2024-12-13T17:20:15,260 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-13T17:20:15,260 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T17:20:15,261 DEBUG [hconnection-0x574d4e11-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:20:15,263 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55736, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:20:15,264 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-13T17:20:15,264 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734110475264 2024-12-13T17:20:15,264 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734110535264 2024-12-13T17:20:15,264 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 4 msec 2024-12-13T17:20:15,290 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,38535,1734110414089-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:15,290 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,38535,1734110414089-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:15,290 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,38535,1734110414089-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:15,290 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bcc0a2319104:38535, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:15,290 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:15,291 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T17:20:15,291 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:20:15,293 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T17:20:15,293 DEBUG [master/bcc0a2319104:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T17:20:15,295 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:20:15,295 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:15,297 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:20:15,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:20:15,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:20:15,308 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 42c63207f2ed324ce48f0b276a67934b, NAME => 'hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1 2024-12-13T17:20:15,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:20:15,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:20:15,314 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:15,314 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 42c63207f2ed324ce48f0b276a67934b, disabling compactions & flushes 2024-12-13T17:20:15,314 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:20:15,314 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:20:15,314 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. after waiting 0 ms 2024-12-13T17:20:15,314 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:20:15,314 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:20:15,314 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 42c63207f2ed324ce48f0b276a67934b: 2024-12-13T17:20:15,315 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:20:15,316 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734110415315"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110415315"}]},"ts":"1734110415315"} 2024-12-13T17:20:15,317 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:20:15,318 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:20:15,319 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110415319"}]},"ts":"1734110415319"} 2024-12-13T17:20:15,320 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T17:20:15,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=42c63207f2ed324ce48f0b276a67934b, ASSIGN}] 2024-12-13T17:20:15,341 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=42c63207f2ed324ce48f0b276a67934b, ASSIGN 2024-12-13T17:20:15,343 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=42c63207f2ed324ce48f0b276a67934b, ASSIGN; state=OFFLINE, location=bcc0a2319104,36927,1734110414236; forceNewPlan=false, retain=false 2024-12-13T17:20:15,494 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=42c63207f2ed324ce48f0b276a67934b, regionState=OPENING, regionLocation=bcc0a2319104,36927,1734110414236 2024-12-13T17:20:15,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 42c63207f2ed324ce48f0b276a67934b, server=bcc0a2319104,36927,1734110414236}] 2024-12-13T17:20:15,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:15,654 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,36927,1734110414236 2024-12-13T17:20:15,662 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:20:15,662 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 42c63207f2ed324ce48f0b276a67934b, NAME => 'hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:20:15,663 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:20:15,663 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:15,663 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:20:15,663 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:20:15,666 INFO [StoreOpener-42c63207f2ed324ce48f0b276a67934b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:20:15,668 INFO [StoreOpener-42c63207f2ed324ce48f0b276a67934b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 42c63207f2ed324ce48f0b276a67934b columnFamilyName info 2024-12-13T17:20:15,668 DEBUG [StoreOpener-42c63207f2ed324ce48f0b276a67934b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:15,669 INFO [StoreOpener-42c63207f2ed324ce48f0b276a67934b-1 {}] regionserver.HStore(327): Store=42c63207f2ed324ce48f0b276a67934b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:20:15,670 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:20:15,670 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:20:15,672 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:20:15,674 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:20:15,675 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 42c63207f2ed324ce48f0b276a67934b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783874, jitterRate=-0.0032535791397094727}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:20:15,676 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 42c63207f2ed324ce48f0b276a67934b: 2024-12-13T17:20:15,677 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b., pid=6, masterSystemTime=1734110415654 2024-12-13T17:20:15,678 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:20:15,679 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:20:15,679 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=42c63207f2ed324ce48f0b276a67934b, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,36927,1734110414236 2024-12-13T17:20:15,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T17:20:15,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 42c63207f2ed324ce48f0b276a67934b, server=bcc0a2319104,36927,1734110414236 in 182 msec 2024-12-13T17:20:15,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T17:20:15,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=42c63207f2ed324ce48f0b276a67934b, ASSIGN in 344 msec 2024-12-13T17:20:15,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:15,686 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:20:15,686 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110415686"}]},"ts":"1734110415686"} 2024-12-13T17:20:15,688 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T17:20:15,698 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T17:20:15,699 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:20:15,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 408 msec 2024-12-13T17:20:15,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:20:15,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:15,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:20:15,710 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T17:20:15,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:20:15,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 87 msec 2024-12-13T17:20:15,804 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T17:20:15,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:15,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:20:15,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 31 msec 2024-12-13T17:20:15,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T17:20:15,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.600sec 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,38535,1734110414089-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:20:15,881 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,38535,1734110414089-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T17:20:15,884 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T17:20:15,884 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T17:20:15,884 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,38535,1734110414089-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:20:15,962 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45939a2d to 127.0.0.1:57192 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8c32659 2024-12-13T17:20:15,974 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c879432, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:20:15,977 DEBUG [hconnection-0x7de78615-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:20:15,979 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:20:15,982 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=bcc0a2319104,38535,1734110414089 2024-12-13T17:20:15,982 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:20:15,986 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-13T17:20:15,987 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-13T17:20:15,987 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-13T17:20:15,988 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T17:20:15,990 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T17:20:15,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38535 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-13T17:20:15,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38535 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-13T17:20:15,992 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38535 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:20:15,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38535 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-13T17:20:15,995 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:20:15,995 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:15,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38535 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-13T17:20:15,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38535 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:20:15,997 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:20:16,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741837_1013 (size=395) 2024-12-13T17:20:16,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741837_1013 (size=395) 2024-12-13T17:20:16,008 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d2462ad1dd5771b17235db5ed67f620e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1 2024-12-13T17:20:16,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40907 is added to blk_1073741838_1014 (size=78) 2024-12-13T17:20:16,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44343 is added to blk_1073741838_1014 (size=78) 2024-12-13T17:20:16,015 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:16,015 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing d2462ad1dd5771b17235db5ed67f620e, disabling compactions & flushes 2024-12-13T17:20:16,015 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:16,015 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:16,015 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. after waiting 0 ms 2024-12-13T17:20:16,015 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:16,015 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:16,015 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for d2462ad1dd5771b17235db5ed67f620e: 2024-12-13T17:20:16,016 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:20:16,016 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1734110416016"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110416016"}]},"ts":"1734110416016"} 2024-12-13T17:20:16,018 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:20:16,019 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:20:16,019 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110416019"}]},"ts":"1734110416019"} 2024-12-13T17:20:16,020 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-13T17:20:16,040 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d2462ad1dd5771b17235db5ed67f620e, ASSIGN}] 2024-12-13T17:20:16,042 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d2462ad1dd5771b17235db5ed67f620e, ASSIGN 2024-12-13T17:20:16,043 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d2462ad1dd5771b17235db5ed67f620e, ASSIGN; state=OFFLINE, location=bcc0a2319104,36927,1734110414236; forceNewPlan=false, retain=false 2024-12-13T17:20:16,194 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d2462ad1dd5771b17235db5ed67f620e, regionState=OPENING, regionLocation=bcc0a2319104,36927,1734110414236 2024-12-13T17:20:16,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure d2462ad1dd5771b17235db5ed67f620e, server=bcc0a2319104,36927,1734110414236}] 2024-12-13T17:20:16,354 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bcc0a2319104,36927,1734110414236 2024-12-13T17:20:16,363 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:16,364 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => d2462ad1dd5771b17235db5ed67f620e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:20:16,364 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:20:16,364 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:20:16,364 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:20:16,364 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:20:16,366 INFO [StoreOpener-d2462ad1dd5771b17235db5ed67f620e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:20:16,368 INFO [StoreOpener-d2462ad1dd5771b17235db5ed67f620e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2462ad1dd5771b17235db5ed67f620e columnFamilyName info 2024-12-13T17:20:16,368 DEBUG [StoreOpener-d2462ad1dd5771b17235db5ed67f620e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:20:16,369 INFO [StoreOpener-d2462ad1dd5771b17235db5ed67f620e-1 {}] regionserver.HStore(327): Store=d2462ad1dd5771b17235db5ed67f620e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:20:16,370 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:20:16,370 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:20:16,373 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:20:16,377 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:20:16,378 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened d2462ad1dd5771b17235db5ed67f620e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747999, jitterRate=-0.04887133836746216}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:20:16,379 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for d2462ad1dd5771b17235db5ed67f620e: 2024-12-13T17:20:16,380 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e., pid=11, masterSystemTime=1734110416353 2024-12-13T17:20:16,383 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:16,383 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:16,384 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=d2462ad1dd5771b17235db5ed67f620e, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,36927,1734110414236 2024-12-13T17:20:16,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T17:20:16,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure d2462ad1dd5771b17235db5ed67f620e, server=bcc0a2319104,36927,1734110414236 in 188 msec 2024-12-13T17:20:16,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T17:20:16,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=d2462ad1dd5771b17235db5ed67f620e, ASSIGN in 352 msec 2024-12-13T17:20:16,397 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:20:16,397 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110416397"}]},"ts":"1734110416397"} 2024-12-13T17:20:16,399 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-13T17:20:16,445 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:20:16,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 454 msec 2024-12-13T17:20:16,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:16,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:16,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:17,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:17,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:17,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:18,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:18,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:18,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:19,523 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:20:19,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:20:19,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:19,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:19,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:20,603 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T17:20:20,605 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-13T17:20:20,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:20,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:20,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:21,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:21,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:21,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:22,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:22,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:22,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:23,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:23,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:23,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:24,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T17:20:24,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-13T17:20:24,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-13T17:20:24,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-13T17:20:24,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-13T17:20:24,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-13T17:20:24,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:24,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:24,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:25,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:25,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:25,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38535 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:20:25,998 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-13T17:20:26,001 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-13T17:20:26,001 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:20:26,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:26,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:26,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:27,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:27,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:27,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:28,006 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:28,006 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:28,006 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:28,006 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:28,007 WARN [DataStreamer for file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta block BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK], DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]) is bad. 2024-12-13T17:20:28,007 WARN [DataStreamer for file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 block BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK], DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]) is bad. 2024-12-13T17:20:28,007 WARN [DataStreamer for file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 block BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK], DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44343,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]) is bad. 2024-12-13T17:20:28,007 WARN [PacketResponder: BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44343] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,007 WARN [PacketResponder: BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44343] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1737908575_22 at /127.0.0.1:40192 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40192 dst: /127.0.0.1:40907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:40218 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40218 dst: /127.0.0.1:40907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:37698 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37698 dst: /127.0.0.1:44343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1737908575_22 at /127.0.0.1:37664 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37664 dst: /127.0.0.1:44343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:37686 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44343:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37686 dst: /127.0.0.1:44343 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:40222 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40222 dst: /127.0.0.1:40907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,047 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@cc0feb2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:28,047 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a4cb381{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:20:28,047 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:20:28,047 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@365421ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:20:28,047 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55558130{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,STOPPED} 2024-12-13T17:20:28,051 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:20:28,051 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:20:28,051 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-602853586-172.17.0.2-1734110412509 (Datanode Uuid cd7ef08b-e07d-4d0d-8298-8bc1e8733972) service to localhost/127.0.0.1:42927 2024-12-13T17:20:28,051 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:20:28,052 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data3/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:28,052 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data4/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:28,052 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:20:28,060 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:28,063 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:20:28,063 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:20:28,063 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:20:28,063 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:20:28,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a634641{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:20:28,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67bc025d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:20:28,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@644262fb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir/jetty-localhost-36731-hadoop-hdfs-3_4_1-tests_jar-_-any-8975113192965457982/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:28,154 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35762114{HTTP/1.1, (http/1.1)}{localhost:36731} 2024-12-13T17:20:28,154 INFO [Time-limited test {}] server.Server(415): Started @200881ms 2024-12-13T17:20:28,155 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:20:28,168 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:28,168 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:28,169 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:28,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:56390 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56390 dst: /127.0.0.1:40907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1737908575_22 at /127.0.0.1:56382 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56382 dst: /127.0.0.1:40907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:56402 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56402 dst: /127.0.0.1:40907 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3684beb3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:28,171 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6cdd29e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:20:28,171 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:20:28,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76f2351a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:20:28,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36b4a933{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,STOPPED} 2024-12-13T17:20:28,172 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:20:28,172 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-602853586-172.17.0.2-1734110412509 (Datanode Uuid ebc35a20-901e-4bc3-a0b9-a60a36b7f088) service to localhost/127.0.0.1:42927 2024-12-13T17:20:28,172 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:20:28,172 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:20:28,173 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data1/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:28,173 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data2/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:28,173 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:20:28,183 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:28,186 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:20:28,187 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:20:28,187 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:20:28,187 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:20:28,187 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6fda4d2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:20:28,187 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58f03a0b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:20:28,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65245b4a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir/jetty-localhost-32965-hadoop-hdfs-3_4_1-tests_jar-_-any-9406729576354023523/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:28,277 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@179009bb{HTTP/1.1, (http/1.1)}{localhost:32965} 2024-12-13T17:20:28,278 INFO [Time-limited test {}] server.Server(415): Started @201005ms 2024-12-13T17:20:28,279 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:20:28,523 WARN [Thread-1106 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:20:28,525 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7284b4e1fc93aaa with lease ID 0x3252334d4d477a34: from storage DS-00efdfd4-455c-42c6-87d0-80262b2540ef node DatanodeRegistration(127.0.0.1:39843, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=36725, infoSecurePort=0, ipcPort=39283, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:28,525 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7284b4e1fc93aaa with lease ID 0x3252334d4d477a34: from storage DS-e57a1a5d-c246-461a-bbaa-fbab323a2ffc node DatanodeRegistration(127.0.0.1:39843, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=36725, infoSecurePort=0, ipcPort=39283, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:28,641 WARN [Thread-1126 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:20:28,643 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc356274de085a9ec with lease ID 0x3252334d4d477a35: from storage DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac node DatanodeRegistration(127.0.0.1:45597, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=44257, infoSecurePort=0, ipcPort=37721, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T17:20:28,643 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc356274de085a9ec with lease ID 0x3252334d4d477a35: from storage DS-42aa5d66-ccc0-4dd3-a06e-69160aa2078f node DatanodeRegistration(127.0.0.1:45597, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=44257, infoSecurePort=0, ipcPort=37721, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:28,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:28,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:28,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 after 68082ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:28,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:29,296 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-13T17:20:29,298 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-13T17:20:29,299 WARN [RS:0;bcc0a2319104:36927.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:29,299 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C36927%2C1734110414236:(num 1734110414743) roll requested 2024-12-13T17:20:29,299 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:29,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36927 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:29,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55738 deadline: 1734110439298, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-13T17:20:29,304 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 newFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:29,304 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-13T17:20:29,304 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:29,305 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36725:36725),(127.0.0.1/127.0.0.1:44257:44257)] 2024-12-13T17:20:29,305 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 is not closed yet, will try archiving it next time 2024-12-13T17:20:29,305 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:29,305 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:29,305 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:29,305 WARN [IPC Server handler 4 on default port 42927 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1016 2024-12-13T17:20:29,305 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 after 0ms 2024-12-13T17:20:29,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:29,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45597 is added to blk_1073741833_1019 (size=2136) 2024-12-13T17:20:29,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:29,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:30,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:30,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:30,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:31,526 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1016: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-13T17:20:31,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:31,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:31,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:32,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:32,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:32,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:33,306 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 after 4001ms 2024-12-13T17:20:33,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:33,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:33,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:34,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:34,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:34,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:35,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:35,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:35,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:36,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:36,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:36,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:37,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:37,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:37,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:38,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:38,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:38,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:39,597 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 4ae54d53dadb6f7d5f47a16f2daa01e3, had cached 0 bytes from a total of 23930 2024-12-13T17:20:39,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:39,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:39,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:40,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:40,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:40,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:41,413 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-13T17:20:41,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:41,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:41,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:42,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:42,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:42,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:43,420 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018 java.io.IOException: Bad response ERROR for BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018 from datanode DatanodeInfoWithStorage[127.0.0.1:45597,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:43,420 WARN [DataStreamer for file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 block BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39843,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK], DatanodeInfoWithStorage[127.0.0.1:45597,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45597,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]) is bad. 2024-12-13T17:20:43,421 WARN [PacketResponder: BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45597] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:43,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:40682 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39843:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40682 dst: /127.0.0.1:39843 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:43,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:43048 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:45597:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43048 dst: /127.0.0.1:45597 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:43,444 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65245b4a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:43,445 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@179009bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:20:43,445 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:20:43,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58f03a0b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:20:43,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6fda4d2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,STOPPED} 2024-12-13T17:20:43,448 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:20:43,448 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:20:43,448 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-602853586-172.17.0.2-1734110412509 (Datanode Uuid ebc35a20-901e-4bc3-a0b9-a60a36b7f088) service to localhost/127.0.0.1:42927 2024-12-13T17:20:43,448 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:20:43,449 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data1/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:43,450 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data2/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:43,450 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:20:43,461 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:43,464 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:20:43,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:20:43,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:20:43,465 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:20:43,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a29a4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:20:43,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d0cde33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:20:43,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78073245{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir/jetty-localhost-39181-hadoop-hdfs-3_4_1-tests_jar-_-any-14223909746793048556/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:43,558 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@84fd095{HTTP/1.1, (http/1.1)}{localhost:39181} 2024-12-13T17:20:43,558 INFO [Time-limited test {}] server.Server(415): Started @216286ms 2024-12-13T17:20:43,559 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:20:43,574 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:43,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1296647611_22 at /127.0.0.1:38590 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39843:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38590 dst: /127.0.0.1:39843 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:43,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@644262fb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:43,576 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35762114{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:20:43,576 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:20:43,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67bc025d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:20:43,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a634641{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,STOPPED} 2024-12-13T17:20:43,578 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:20:43,578 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:20:43,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:20:43,578 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-602853586-172.17.0.2-1734110412509 (Datanode Uuid cd7ef08b-e07d-4d0d-8298-8bc1e8733972) service to localhost/127.0.0.1:42927 2024-12-13T17:20:43,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data3/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:43,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data4/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:20:43,579 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:20:43,587 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:20:43,591 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:20:43,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:20:43,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:20:43,592 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:20:43,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@584aabf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:20:43,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0ae2f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:20:43,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:43,683 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@789f09e2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/java.io.tmpdir/jetty-localhost-43289-hadoop-hdfs-3_4_1-tests_jar-_-any-5448706272787239305/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:20:43,683 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@766df515{HTTP/1.1, (http/1.1)}{localhost:43289} 2024-12-13T17:20:43,683 INFO [Time-limited test {}] server.Server(415): Started @216411ms 2024-12-13T17:20:43,684 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:20:43,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:43,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:43,943 WARN [Thread-1181 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:20:43,946 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99f365cc216942ee with lease ID 0x3252334d4d477a36: from storage DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac node DatanodeRegistration(127.0.0.1:40291, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=42071, infoSecurePort=0, ipcPort=44121, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:43,946 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99f365cc216942ee with lease ID 0x3252334d4d477a36: from storage DS-42aa5d66-ccc0-4dd3-a06e-69160aa2078f node DatanodeRegistration(127.0.0.1:40291, datanodeUuid=ebc35a20-901e-4bc3-a0b9-a60a36b7f088, infoPort=42071, infoSecurePort=0, ipcPort=44121, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:44,027 WARN [Thread-1201 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:20:44,029 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x383c3283ff1a1e06 with lease ID 0x3252334d4d477a37: from storage DS-00efdfd4-455c-42c6-87d0-80262b2540ef node DatanodeRegistration(127.0.0.1:39523, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=36049, infoSecurePort=0, ipcPort=44075, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:44,029 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x383c3283ff1a1e06 with lease ID 0x3252334d4d477a37: from storage DS-e57a1a5d-c246-461a-bbaa-fbab323a2ffc node DatanodeRegistration(127.0.0.1:39523, datanodeUuid=cd7ef08b-e07d-4d0d-8298-8bc1e8733972, infoPort=36049, infoSecurePort=0, ipcPort=44075, storageInfo=lv=-57;cid=testClusterID;nsid=2046495365;c=1734110412509), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:20:44,067 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:20:44,526 WARN [master/bcc0a2319104:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,527 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C38535%2C1734110414089:(num 1734110414344) roll requested 2024-12-13T17:20:44,527 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,528 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C38535%2C1734110414089.1734110444527 2024-12-13T17:20:44,528 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,541 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-13T17:20:44,541 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 with entries=92, filesize=45.99 KB; new WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110444527 2024-12-13T17:20:44,541 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36049:36049),(127.0.0.1/127.0.0.1:42071:42071)] 2024-12-13T17:20:44,542 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 is not closed yet, will try archiving it next time 2024-12-13T17:20:44,542 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,542 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,542 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 2024-12-13T17:20:44,543 WARN [IPC Server handler 3 on default port 42927 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741830_1015 2024-12-13T17:20:44,543 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 after 1ms 2024-12-13T17:20:44,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:44,704 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-13T17:20:44,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:44,708 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-13T17:20:44,711 WARN [RS:0;bcc0a2319104:36927.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39843,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,711 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C36927%2C1734110414236:(num 1734110429299) roll requested 2024-12-13T17:20:44,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36927 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39843,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,712 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C36927%2C1734110414236.1734110444711 2024-12-13T17:20:44,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55738 deadline: 1734110454710, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-13T17:20:44,719 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 newFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 2024-12-13T17:20:44,720 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-13T17:20:44,720 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 2024-12-13T17:20:44,720 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36049:36049),(127.0.0.1/127.0.0.1:42071:42071)] 2024-12-13T17:20:44,720 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 is not closed yet, will try archiving it next time 2024-12-13T17:20:44,720 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39843,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,720 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39843,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:44,720 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:44,721 WARN [IPC Server handler 3 on default port 42927 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741839_1020 2024-12-13T17:20:44,721 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 after 1ms 2024-12-13T17:20:44,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:45,587 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:20:45,590 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:20:45,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:45,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:45,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:45,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 after 68069ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:46,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:46,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:46,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:46,946 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1020: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-13T17:20:47,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:47,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:47,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:48,545 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089/bcc0a2319104%2C38535%2C1734110414089.1734110414344 after 4003ms 2024-12-13T17:20:48,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:48,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:48,723 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 after 4003ms 2024-12-13T17:20:48,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:49,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:49,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:49,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:50,032 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 2) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-13T17:20:50,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:50,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:50,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:51,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:51,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:51,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:52,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:52,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:52,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:53,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:53,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:53,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:54,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:54,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:54,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:55,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:55,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:55,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:56,545 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T17:20:56,545 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T17:20:56,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:56,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:56,759 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:20:56,770 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 newFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:20:56,772 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:20:56,772 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42071:42071),(127.0.0.1/127.0.0.1:36049:36049)] 2024-12-13T17:20:56,772 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 is not closed yet, will try archiving it next time 2024-12-13T17:20:56,772 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:56,772 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:56,772 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 after 0ms 2024-12-13T17:20:56,773 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:56,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741841_1023 (size=1264) 2024-12-13T17:20:56,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741841_1023 (size=1264) 2024-12-13T17:20:56,780 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1734110415676/Put/vlen=162/seqid=0] 2024-12-13T17:20:56,780 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1734110415714/Put/vlen=9/seqid=0] 2024-12-13T17:20:56,780 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1734110415812/Put/vlen=7/seqid=0] 2024-12-13T17:20:56,781 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1734110416379/Put/vlen=218/seqid=0] 2024-12-13T17:20:56,781 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1734110426004/Put/vlen=1045/seqid=0] 2024-12-13T17:20:56,781 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110414743 2024-12-13T17:20:56,781 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:56,781 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:56,782 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 after 1ms 2024-12-13T17:20:56,782 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:56,785 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1734110439407/Put/vlen=1045/seqid=0] 2024-12-13T17:20:56,785 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1734110441415/Put/vlen=1045/seqid=0] 2024-12-13T17:20:56,786 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110429299 2024-12-13T17:20:56,786 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 2024-12-13T17:20:56,786 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 2024-12-13T17:20:56,786 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 after 0ms 2024-12-13T17:20:56,786 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110444711 2024-12-13T17:20:56,790 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1734110454756/Put/vlen=1045/seqid=0] 2024-12-13T17:20:56,790 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:20:56,790 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:20:56,790 WARN [IPC Server handler 3 on default port 42927 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741842_1025 2024-12-13T17:20:56,790 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 after 0ms 2024-12-13T17:20:56,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:57,054 WARN [ResponseProcessor for block BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:57,054 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1737908575_22 at /127.0.0.1:55754 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:40291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55754 dst: /127.0.0.1:40291 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40291 remote=/127.0.0.1:55754]. Total timeout mills is 60000, 59716 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:57,054 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1737908575_22 at /127.0.0.1:56336 [Receiving block BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:39523:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56336 dst: /127.0.0.1:39523 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:20:57,054 WARN [DataStreamer for file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 block BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40291,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK], DatanodeInfoWithStorage[127.0.0.1:39523,DS-00efdfd4-455c-42c6-87d0-80262b2540ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40291,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]) is bad. 2024-12-13T17:20:57,057 WARN [DataStreamer for file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 block BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:20:57,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741842_1026 (size=85) 2024-12-13T17:20:57,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:57,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:57,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:58,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:58,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:58,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:59,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:59,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:20:59,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:00,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:00,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:00,792 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 after 4001ms 2024-12-13T17:21:00,792 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:21:00,801 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:21:00,802 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 42c63207f2ed324ce48f0b276a67934b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T17:21:00,802 WARN [RS:0;bcc0a2319104:36927.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=7, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,803 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C36927%2C1734110414236:(num 1734110456758) roll requested 2024-12-13T17:21:00,803 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 42c63207f2ed324ce48f0b276a67934b: 2024-12-13T17:21:00,803 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C36927%2C1734110414236.1734110460803 2024-12-13T17:21:00,803 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,804 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-13T17:21:00,804 WARN [RS_OPEN_META-regionserver/bcc0a2319104:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,804 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-13T17:21:00,804 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,804 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing d2462ad1dd5771b17235db5ed67f620e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-13T17:21:00,805 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for d2462ad1dd5771b17235db5ed67f620e: 2024-12-13T17:21:00,805 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,807 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T17:21:00,808 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T17:21:00,808 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45939a2d to 127.0.0.1:57192 2024-12-13T17:21:00,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:21:00,808 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T17:21:00,808 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1385662243, stopped=false 2024-12-13T17:21:00,808 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=bcc0a2319104,38535,1734110414089 2024-12-13T17:21:00,809 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 newFile=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110460803 2024-12-13T17:21:00,809 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL 2024-12-13T17:21:00,809 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110460803 2024-12-13T17:21:00,809 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42071:42071),(127.0.0.1/127.0.0.1:36049:36049)] 2024-12-13T17:21:00,810 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 is not closed yet, will try archiving it next time 2024-12-13T17:21:00,810 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog bcc0a2319104%2C36927%2C1734110414236.meta:.meta(num 1734110415193) roll requested 2024-12-13T17:21:00,810 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,810 INFO [regionserver/bcc0a2319104:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C36927%2C1734110414236.meta.1734110460810.meta 2024-12-13T17:21:00,810 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-602853586-172.17.0.2-1734110412509:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,810 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:21:00,811 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 after 1ms 2024-12-13T17:21:00,811 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.1734110456758 to hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/oldWALs/bcc0a2319104%2C36927%2C1734110414236.1734110456758 2024-12-13T17:21:00,815 WARN [regionserver/bcc0a2319104:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-13T17:21:00,815 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110460810.meta 2024-12-13T17:21:00,815 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36049:36049),(127.0.0.1/127.0.0.1:42071:42071)] 2024-12-13T17:21:00,815 DEBUG [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta is not closed yet, will try archiving it next time 2024-12-13T17:21:00,815 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,815 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40907,DS-cbbe8f95-9bca-4b53-94b2-db13fa46e4ac,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-13T17:21:00,815 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta 2024-12-13T17:21:00,816 WARN [IPC Server handler 4 on default port 42927 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta has not been closed. Lease recovery is in progress. RecoveryId = 1029 for block blk_1073741834_1017 2024-12-13T17:21:00,816 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta after 1ms 2024-12-13T17:21:00,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:00,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:21:00,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:21:00,885 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T17:21:00,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:00,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:00,886 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:21:00,886 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,36927,1734110414236' ***** 2024-12-13T17:21:00,886 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T17:21:00,886 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:21:00,886 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T17:21:00,886 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:21:00,886 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T17:21:00,886 INFO [RS:0;bcc0a2319104:36927 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T17:21:00,886 INFO [RS:0;bcc0a2319104:36927 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T17:21:00,886 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(3579): Received CLOSE for 42c63207f2ed324ce48f0b276a67934b 2024-12-13T17:21:00,887 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(3579): Received CLOSE for d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:21:00,887 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,36927,1734110414236 2024-12-13T17:21:00,887 DEBUG [RS:0;bcc0a2319104:36927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:21:00,887 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 42c63207f2ed324ce48f0b276a67934b, disabling compactions & flushes 2024-12-13T17:21:00,887 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T17:21:00,887 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:21:00,887 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:21:00,887 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. after waiting 0 ms 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:21:00,887 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 42c63207f2ed324ce48f0b276a67934b 1/1 column families, dataSize=78 B heapSize=728 B 2024-12-13T17:21:00,887 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-13T17:21:00,887 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1603): Online Regions={42c63207f2ed324ce48f0b276a67934b=hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b., 1588230740=hbase:meta,,1.1588230740, d2462ad1dd5771b17235db5ed67f620e=TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e.} 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:21:00,887 WARN [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-13T17:21:00,887 DEBUG [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 42c63207f2ed324ce48f0b276a67934b, d2462ad1dd5771b17235db5ed67f620e 2024-12-13T17:21:00,887 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:21:00,887 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:21:00,887 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-13T17:21:00,888 WARN [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-13T17:21:00,888 WARN [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-13T17:21:00,906 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b/.tmp/info/6bfeb1de50a64fef8782c7ed68ca762b is 45, key is default/info:d/1734110415714/Put/seqid=0 2024-12-13T17:21:00,907 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/.tmp/info/ec768243e2424915bdfdcaf468d1e037 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e./info:regioninfo/1734110416384/Put/seqid=0 2024-12-13T17:21:00,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741845_1030 (size=5037) 2024-12-13T17:21:00,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741845_1030 (size=5037) 2024-12-13T17:21:00,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741846_1031 (size=8268) 2024-12-13T17:21:00,912 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b/.tmp/info/6bfeb1de50a64fef8782c7ed68ca762b 2024-12-13T17:21:00,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741846_1031 (size=8268) 2024-12-13T17:21:00,912 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.66 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/.tmp/info/ec768243e2424915bdfdcaf468d1e037 2024-12-13T17:21:00,919 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b/.tmp/info/6bfeb1de50a64fef8782c7ed68ca762b as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b/info/6bfeb1de50a64fef8782c7ed68ca762b 2024-12-13T17:21:00,925 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b/info/6bfeb1de50a64fef8782c7ed68ca762b, entries=2, sequenceid=8, filesize=4.9 K 2024-12-13T17:21:00,926 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 42c63207f2ed324ce48f0b276a67934b in 39ms, sequenceid=8, compaction requested=false 2024-12-13T17:21:00,930 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/namespace/42c63207f2ed324ce48f0b276a67934b/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-13T17:21:00,931 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:21:00,931 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 42c63207f2ed324ce48f0b276a67934b: 2024-12-13T17:21:00,931 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734110415291.42c63207f2ed324ce48f0b276a67934b. 2024-12-13T17:21:00,931 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d2462ad1dd5771b17235db5ed67f620e, disabling compactions & flushes 2024-12-13T17:21:00,931 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:21:00,931 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:21:00,931 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. after waiting 0 ms 2024-12-13T17:21:00,931 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:21:00,932 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing d2462ad1dd5771b17235db5ed67f620e 1/1 column families, dataSize=4.20 KB heapSize=4.98 KB 2024-12-13T17:21:00,932 WARN [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-13T17:21:00,934 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/.tmp/table/4a0f5e3ead7046d2b42cb571e09fdfd6 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1734110416397/Put/seqid=0 2024-12-13T17:21:00,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741847_1032 (size=5482) 2024-12-13T17:21:00,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741847_1032 (size=5482) 2024-12-13T17:21:00,948 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=244 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/.tmp/table/4a0f5e3ead7046d2b42cb571e09fdfd6 2024-12-13T17:21:00,954 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/.tmp/info/ec768243e2424915bdfdcaf468d1e037 as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/info/ec768243e2424915bdfdcaf468d1e037 2024-12-13T17:21:00,955 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e/.tmp/info/7b3f59a6913a4f099f8c1a3ba0a5bd63 is 1080, key is row1002/info:/1734110426004/Put/seqid=0 2024-12-13T17:21:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741848_1033 (size=9270) 2024-12-13T17:21:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741848_1033 (size=9270) 2024-12-13T17:21:00,960 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e/.tmp/info/7b3f59a6913a4f099f8c1a3ba0a5bd63 2024-12-13T17:21:00,961 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/info/ec768243e2424915bdfdcaf468d1e037, entries=20, sequenceid=16, filesize=8.1 K 2024-12-13T17:21:00,962 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/.tmp/table/4a0f5e3ead7046d2b42cb571e09fdfd6 as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/table/4a0f5e3ead7046d2b42cb571e09fdfd6 2024-12-13T17:21:00,967 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e/.tmp/info/7b3f59a6913a4f099f8c1a3ba0a5bd63 as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e/info/7b3f59a6913a4f099f8c1a3ba0a5bd63 2024-12-13T17:21:00,968 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/table/4a0f5e3ead7046d2b42cb571e09fdfd6, entries=4, sequenceid=16, filesize=5.4 K 2024-12-13T17:21:00,969 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.90 KB/2972, heapSize ~5.14 KB/5264, currentSize=0 B/0 for 1588230740 in 82ms, sequenceid=16, compaction requested=false 2024-12-13T17:21:00,973 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e/info/7b3f59a6913a4f099f8c1a3ba0a5bd63, entries=4, sequenceid=12, filesize=9.1 K 2024-12-13T17:21:00,974 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-12-13T17:21:00,974 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T17:21:00,974 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for d2462ad1dd5771b17235db5ed67f620e in 43ms, sequenceid=12, compaction requested=false 2024-12-13T17:21:00,974 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:21:00,974 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:21:00,974 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T17:21:00,978 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/data/default/TestLogRolling-testLogRollOnPipelineRestart/d2462ad1dd5771b17235db5ed67f620e/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-12-13T17:21:00,979 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:21:00,979 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d2462ad1dd5771b17235db5ed67f620e: 2024-12-13T17:21:00,979 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1734110415992.d2462ad1dd5771b17235db5ed67f620e. 2024-12-13T17:21:01,087 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,36927,1734110414236; all regions closed. 2024-12-13T17:21:01,088 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236 2024-12-13T17:21:01,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741844_1028 (size=761) 2024-12-13T17:21:01,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741844_1028 (size=761) 2024-12-13T17:21:01,650 INFO [regionserver/bcc0a2319104:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-13T17:21:01,650 INFO [regionserver/bcc0a2319104:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-13T17:21:01,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:01,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:01,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:02,036 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1017: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-13T17:21:02,612 INFO [regionserver/bcc0a2319104:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:21:02,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:02,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:02,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:03,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:03,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:03,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:04,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T17:21:04,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-13T17:21:04,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:04,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta after 68072ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:21:04,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:04,817 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta after 4002ms 2024-12-13T17:21:04,817 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta to hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/oldWALs/bcc0a2319104%2C36927%2C1734110414236.meta.1734110415193.meta 2024-12-13T17:21:04,823 DEBUG [RS:0;bcc0a2319104:36927 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/oldWALs 2024-12-13T17:21:04,823 INFO [RS:0;bcc0a2319104:36927 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C36927%2C1734110414236.meta:.meta(num 1734110460810) 2024-12-13T17:21:04,828 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/WALs/bcc0a2319104,36927,1734110414236 2024-12-13T17:21:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741843_1027 (size=1979) 2024-12-13T17:21:04,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741843_1027 (size=1979) 2024-12-13T17:21:04,851 DEBUG [RS:0;bcc0a2319104:36927 {}] wal.AbstractFSWAL(1071): Moved 4 WAL file(s) to /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/oldWALs 2024-12-13T17:21:04,851 INFO [RS:0;bcc0a2319104:36927 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C36927%2C1734110414236:(num 1734110460803) 2024-12-13T17:21:04,851 DEBUG [RS:0;bcc0a2319104:36927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:21:04,851 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:21:04,852 INFO [RS:0;bcc0a2319104:36927 {}] hbase.ChoreService(370): Chore service for: regionserver/bcc0a2319104:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T17:21:04,852 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:21:04,852 INFO [RS:0;bcc0a2319104:36927 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36927 2024-12-13T17:21:04,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:04,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:21:04,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bcc0a2319104,36927,1734110414236 2024-12-13T17:21:04,864 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bcc0a2319104,36927,1734110414236] 2024-12-13T17:21:04,864 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bcc0a2319104,36927,1734110414236; numProcessing=1 2024-12-13T17:21:04,879 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bcc0a2319104,36927,1734110414236 already deleted, retry=false 2024-12-13T17:21:04,879 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bcc0a2319104,36927,1734110414236 expired; onlineServers=0 2024-12-13T17:21:04,879 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,38535,1734110414089' ***** 2024-12-13T17:21:04,879 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T17:21:04,880 DEBUG [M:0;bcc0a2319104:38535 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2862a5b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:21:04,880 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,38535,1734110414089 2024-12-13T17:21:04,880 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,38535,1734110414089; all regions closed. 2024-12-13T17:21:04,880 DEBUG [M:0;bcc0a2319104:38535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:21:04,880 DEBUG [M:0;bcc0a2319104:38535 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T17:21:04,880 DEBUG [M:0;bcc0a2319104:38535 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T17:21:04,880 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T17:21:04,880 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110414527 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110414527,5,FailOnTimeoutGroup] 2024-12-13T17:21:04,880 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110414527 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110414527,5,FailOnTimeoutGroup] 2024-12-13T17:21:04,880 INFO [M:0;bcc0a2319104:38535 {}] hbase.ChoreService(370): Chore service for: master/bcc0a2319104:0 had [] on shutdown 2024-12-13T17:21:04,880 DEBUG [M:0;bcc0a2319104:38535 {}] master.HMaster(1733): Stopping service threads 2024-12-13T17:21:04,880 INFO [M:0;bcc0a2319104:38535 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T17:21:04,881 INFO [M:0;bcc0a2319104:38535 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T17:21:04,881 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T17:21:04,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T17:21:04,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:04,952 DEBUG [M:0;bcc0a2319104:38535 {}] zookeeper.ZKUtil(347): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T17:21:04,952 WARN [M:0;bcc0a2319104:38535 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T17:21:04,952 INFO [M:0;bcc0a2319104:38535 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T17:21:04,952 INFO [M:0;bcc0a2319104:38535 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T17:21:04,953 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:21:04,953 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:04,953 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:04,953 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:21:04,953 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:21:04,953 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:04,953 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.09 KB heapSize=49.26 KB 2024-12-13T17:21:04,972 INFO [RS:0;bcc0a2319104:36927 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,36927,1734110414236; zookeeper connection closed. 2024-12-13T17:21:04,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:21:04,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36927-0x100206821760001, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:21:04,974 DEBUG [M:0;bcc0a2319104:38535 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/258f2c081f644fe88dabff5923293373 is 82, key is hbase:meta,,1/info:regioninfo/1734110415216/Put/seqid=0 2024-12-13T17:21:04,979 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6f45b5d9 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6f45b5d9 2024-12-13T17:21:04,979 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-13T17:21:05,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741849_1034 (size=5672) 2024-12-13T17:21:05,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741849_1034 (size=5672) 2024-12-13T17:21:05,001 INFO [M:0;bcc0a2319104:38535 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/258f2c081f644fe88dabff5923293373 2024-12-13T17:21:05,044 DEBUG [M:0;bcc0a2319104:38535 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb786c796b4245f0a9ec8396b282779c is 778, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734110416448/Put/seqid=0 2024-12-13T17:21:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741850_1035 (size=7469) 2024-12-13T17:21:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741850_1035 (size=7469) 2024-12-13T17:21:05,066 INFO [M:0;bcc0a2319104:38535 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.49 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb786c796b4245f0a9ec8396b282779c 2024-12-13T17:21:05,098 DEBUG [M:0;bcc0a2319104:38535 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/90d7ae2c39604a2cb5f7c43b759364b6 is 69, key is bcc0a2319104,36927,1734110414236/rs:state/1734110414590/Put/seqid=0 2024-12-13T17:21:05,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741851_1036 (size=5156) 2024-12-13T17:21:05,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741851_1036 (size=5156) 2024-12-13T17:21:05,121 INFO [M:0;bcc0a2319104:38535 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/90d7ae2c39604a2cb5f7c43b759364b6 2024-12-13T17:21:05,153 DEBUG [M:0;bcc0a2319104:38535 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/959f2572fcc64c3db4dc81dca6fcfef5 is 52, key is load_balancer_on/state:d/1734110415984/Put/seqid=0 2024-12-13T17:21:05,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741852_1037 (size=5056) 2024-12-13T17:21:05,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741852_1037 (size=5056) 2024-12-13T17:21:05,169 INFO [M:0;bcc0a2319104:38535 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/959f2572fcc64c3db4dc81dca6fcfef5 2024-12-13T17:21:05,179 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/258f2c081f644fe88dabff5923293373 as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/258f2c081f644fe88dabff5923293373 2024-12-13T17:21:05,194 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/258f2c081f644fe88dabff5923293373, entries=8, sequenceid=96, filesize=5.5 K 2024-12-13T17:21:05,196 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb786c796b4245f0a9ec8396b282779c as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb786c796b4245f0a9ec8396b282779c 2024-12-13T17:21:05,204 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb786c796b4245f0a9ec8396b282779c, entries=11, sequenceid=96, filesize=7.3 K 2024-12-13T17:21:05,205 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/90d7ae2c39604a2cb5f7c43b759364b6 as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/90d7ae2c39604a2cb5f7c43b759364b6 2024-12-13T17:21:05,213 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/90d7ae2c39604a2cb5f7c43b759364b6, entries=1, sequenceid=96, filesize=5.0 K 2024-12-13T17:21:05,220 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/959f2572fcc64c3db4dc81dca6fcfef5 as hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/959f2572fcc64c3db4dc81dca6fcfef5 2024-12-13T17:21:05,228 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42927/user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/959f2572fcc64c3db4dc81dca6fcfef5, entries=1, sequenceid=96, filesize=4.9 K 2024-12-13T17:21:05,229 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.09 KB/41052, heapSize ~49.20 KB/50376, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 276ms, sequenceid=96, compaction requested=false 2024-12-13T17:21:05,268 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:05,268 DEBUG [M:0;bcc0a2319104:38535 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:21:05,268 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/b617f244-1bf1-0830-e11c-1c3b6dfcb9c1/MasterData/WALs/bcc0a2319104,38535,1734110414089 2024-12-13T17:21:05,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40291 is added to blk_1073741840_1021 (size=757) 2024-12-13T17:21:05,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39523 is added to blk_1073741840_1021 (size=757) 2024-12-13T17:21:05,272 INFO [M:0;bcc0a2319104:38535 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T17:21:05,272 INFO [M:0;bcc0a2319104:38535 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38535 2024-12-13T17:21:05,272 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:21:05,279 DEBUG [M:0;bcc0a2319104:38535 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/bcc0a2319104,38535,1734110414089 already deleted, retry=false 2024-12-13T17:21:05,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:21:05,388 INFO [M:0;bcc0a2319104:38535 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,38535,1734110414089; zookeeper connection closed. 2024-12-13T17:21:05,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38535-0x100206821760000, quorum=127.0.0.1:57192, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:21:05,436 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@789f09e2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:21:05,437 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@766df515{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:21:05,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:21:05,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0ae2f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:21:05,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@584aabf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,STOPPED} 2024-12-13T17:21:05,441 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:21:05,441 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:21:05,441 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:21:05,441 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-602853586-172.17.0.2-1734110412509 (Datanode Uuid cd7ef08b-e07d-4d0d-8298-8bc1e8733972) service to localhost/127.0.0.1:42927 2024-12-13T17:21:05,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data3/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:21:05,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data4/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:21:05,442 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:21:05,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78073245{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:21:05,461 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@84fd095{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:21:05,461 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:21:05,461 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d0cde33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:21:05,461 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a29a4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,STOPPED} 2024-12-13T17:21:05,463 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:21:05,463 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:21:05,463 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:21:05,463 WARN [BP-602853586-172.17.0.2-1734110412509 heartbeating to localhost/127.0.0.1:42927 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-602853586-172.17.0.2-1734110412509 (Datanode Uuid ebc35a20-901e-4bc3-a0b9-a60a36b7f088) service to localhost/127.0.0.1:42927 2024-12-13T17:21:05,464 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data1/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:21:05,464 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/cluster_1bbe54eb-58a5-af1a-0866-29993c667105/dfs/data/data2/current/BP-602853586-172.17.0.2-1734110412509 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:21:05,464 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:21:05,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2be233f1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:21:05,474 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a2091a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:21:05,474 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:21:05,474 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37b4f391{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:21:05,474 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13ad263a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir/,STOPPED} 2024-12-13T17:21:05,483 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T17:21:05,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-13T17:21:05,519 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=99 (was 86) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:42927 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42927 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42927 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:42927 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42927 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1140069844) connection to localhost/127.0.0.1:42927 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42927 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42927 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 410) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=117 (was 114) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3485 (was 3706) 2024-12-13T17:21:05,526 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=99, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=3485 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.log.dir so I do NOT create it in target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e0ba792a-00e1-aa21-3063-46906b14641b/hadoop.tmp.dir so I do NOT create it in target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763, deleteOnExit=true 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/test.cache.data in system properties and HBase conf 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir in system properties and HBase conf 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T17:21:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T17:21:05,528 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/nfs.dump.dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/java.io.tmpdir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T17:21:05,528 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T17:21:05,539 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:21:05,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:05,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:05,831 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:21:05,837 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:21:05,842 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:21:05,842 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:21:05,842 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:21:05,842 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:21:05,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64ec3f26{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:21:05,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3334e3c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:21:05,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:05,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62f70d3c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/java.io.tmpdir/jetty-localhost-37557-hadoop-hdfs-3_4_1-tests_jar-_-any-11170256053241772956/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:21:05,956 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54da9c94{HTTP/1.1, (http/1.1)}{localhost:37557} 2024-12-13T17:21:05,956 INFO [Time-limited test {}] server.Server(415): Started @238683ms 2024-12-13T17:21:05,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:05,981 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:21:06,266 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:21:06,266 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,267 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,267 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,267 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,278 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,278 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,279 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,279 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,279 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,280 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,282 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,282 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,282 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,284 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:06,290 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:21:06,293 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:21:06,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:21:06,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:21:06,294 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:21:06,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56d95970{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:21:06,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72f6e6ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:21:06,382 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a7e2505{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/java.io.tmpdir/jetty-localhost-46489-hadoop-hdfs-3_4_1-tests_jar-_-any-4033544056443977312/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:21:06,382 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dc3f173{HTTP/1.1, (http/1.1)}{localhost:46489} 2024-12-13T17:21:06,382 INFO [Time-limited test {}] server.Server(415): Started @239109ms 2024-12-13T17:21:06,383 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:21:06,412 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:21:06,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:21:06,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:21:06,416 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:21:06,416 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:21:06,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bc0a4a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:21:06,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a11cb82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:21:06,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@554c1d71{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/java.io.tmpdir/jetty-localhost-40981-hadoop-hdfs-3_4_1-tests_jar-_-any-9975202152073271601/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:21:06,508 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@726e788f{HTTP/1.1, (http/1.1)}{localhost:40981} 2024-12-13T17:21:06,508 INFO [Time-limited test {}] server.Server(415): Started @239236ms 2024-12-13T17:21:06,510 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:21:06,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:06,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:06,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:07,171 WARN [Thread-1418 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data1/current/BP-801480977-172.17.0.2-1734110465550/current, will proceed with Du for space computation calculation, 2024-12-13T17:21:07,172 WARN [Thread-1419 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data2/current/BP-801480977-172.17.0.2-1734110465550/current, will proceed with Du for space computation calculation, 2024-12-13T17:21:07,196 WARN [Thread-1382 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:21:07,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f7fbf8fc70663db with lease ID 0xd2e0d61a952055d: Processing first storage report for DS-8f4f6b01-cc3d-456d-be7e-18b524556c08 from datanode DatanodeRegistration(127.0.0.1:36517, datanodeUuid=48441be1-7355-41f3-b362-a1ea369ab017, infoPort=43705, infoSecurePort=0, ipcPort=43047, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550) 2024-12-13T17:21:07,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f7fbf8fc70663db with lease ID 0xd2e0d61a952055d: from storage DS-8f4f6b01-cc3d-456d-be7e-18b524556c08 node DatanodeRegistration(127.0.0.1:36517, datanodeUuid=48441be1-7355-41f3-b362-a1ea369ab017, infoPort=43705, infoSecurePort=0, ipcPort=43047, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:21:07,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f7fbf8fc70663db with lease ID 0xd2e0d61a952055d: Processing first storage report for DS-df613eeb-7f8c-4ea6-890e-3770ab1b685c from datanode DatanodeRegistration(127.0.0.1:36517, datanodeUuid=48441be1-7355-41f3-b362-a1ea369ab017, infoPort=43705, infoSecurePort=0, ipcPort=43047, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550) 2024-12-13T17:21:07,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f7fbf8fc70663db with lease ID 0xd2e0d61a952055d: from storage DS-df613eeb-7f8c-4ea6-890e-3770ab1b685c node DatanodeRegistration(127.0.0.1:36517, datanodeUuid=48441be1-7355-41f3-b362-a1ea369ab017, infoPort=43705, infoSecurePort=0, ipcPort=43047, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:21:07,374 WARN [Thread-1429 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data3/current/BP-801480977-172.17.0.2-1734110465550/current, will proceed with Du for space computation calculation, 2024-12-13T17:21:07,374 WARN [Thread-1430 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data4/current/BP-801480977-172.17.0.2-1734110465550/current, will proceed with Du for space computation calculation, 2024-12-13T17:21:07,388 WARN [Thread-1405 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:21:07,390 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa496824b890606 with lease ID 0xd2e0d61a952055e: Processing first storage report for DS-946fd42c-b957-4506-b9df-9ab9dc37f3ff from datanode DatanodeRegistration(127.0.0.1:40217, datanodeUuid=80931abb-de8e-4067-9546-08521da12df6, infoPort=39017, infoSecurePort=0, ipcPort=37365, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550) 2024-12-13T17:21:07,390 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa496824b890606 with lease ID 0xd2e0d61a952055e: from storage DS-946fd42c-b957-4506-b9df-9ab9dc37f3ff node DatanodeRegistration(127.0.0.1:40217, datanodeUuid=80931abb-de8e-4067-9546-08521da12df6, infoPort=39017, infoSecurePort=0, ipcPort=37365, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:21:07,390 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa496824b890606 with lease ID 0xd2e0d61a952055e: Processing first storage report for DS-b7f0f49d-ed25-4c4d-bffa-d8f0b4cb0b01 from datanode DatanodeRegistration(127.0.0.1:40217, datanodeUuid=80931abb-de8e-4067-9546-08521da12df6, infoPort=39017, infoSecurePort=0, ipcPort=37365, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550) 2024-12-13T17:21:07,390 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa496824b890606 with lease ID 0xd2e0d61a952055e: from storage DS-b7f0f49d-ed25-4c4d-bffa-d8f0b4cb0b01 node DatanodeRegistration(127.0.0.1:40217, datanodeUuid=80931abb-de8e-4067-9546-08521da12df6, infoPort=39017, infoSecurePort=0, ipcPort=37365, storageInfo=lv=-57;cid=testClusterID;nsid=1578684384;c=1734110465550), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T17:21:07,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3 2024-12-13T17:21:07,443 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/zookeeper_0, clientPort=51199, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T17:21:07,444 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51199 2024-12-13T17:21:07,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:07,446 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:07,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:21:07,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:21:07,457 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16 with version=8 2024-12-13T17:21:07,457 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/hbase-staging 2024-12-13T17:21:07,459 INFO [Time-limited test {}] client.ConnectionUtils(129): master/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:21:07,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:21:07,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:21:07,459 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:21:07,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:21:07,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:21:07,459 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:21:07,460 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:21:07,460 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33541 2024-12-13T17:21:07,461 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:07,462 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:07,464 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33541 connecting to ZooKeeper ensemble=127.0.0.1:51199 2024-12-13T17:21:07,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335410x0, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:21:07,549 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33541-0x1002068f1f20000 connected 2024-12-13T17:21:07,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:07,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:07,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:07,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:21:07,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:21:07,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:21:07,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33541 2024-12-13T17:21:07,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33541 2024-12-13T17:21:07,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33541 2024-12-13T17:21:07,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33541 2024-12-13T17:21:07,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33541 2024-12-13T17:21:07,935 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16, hbase.cluster.distributed=false 2024-12-13T17:21:07,951 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:21:07,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:21:07,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:21:07,951 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:21:07,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:21:07,951 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:21:07,951 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:21:07,952 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:21:07,952 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44309 2024-12-13T17:21:07,953 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T17:21:07,953 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T17:21:07,954 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:07,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:07,958 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44309 connecting to ZooKeeper ensemble=127.0.0.1:51199 2024-12-13T17:21:08,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443090x0, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:21:08,008 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44309-0x1002068f1f20001 connected 2024-12-13T17:21:08,008 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:21:08,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:21:08,011 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:21:08,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44309 2024-12-13T17:21:08,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44309 2024-12-13T17:21:08,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44309 2024-12-13T17:21:08,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44309 2024-12-13T17:21:08,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44309 2024-12-13T17:21:08,016 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/bcc0a2319104,33541,1734110467458 2024-12-13T17:21:08,030 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bcc0a2319104:33541 2024-12-13T17:21:08,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:21:08,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:21:08,058 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bcc0a2319104,33541,1734110467458 2024-12-13T17:21:08,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:21:08,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:21:08,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,097 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:21:08,098 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bcc0a2319104,33541,1734110467458 from backup master directory 2024-12-13T17:21:08,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bcc0a2319104,33541,1734110467458 2024-12-13T17:21:08,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:21:08,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:21:08,144 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:21:08,144 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:21:08,144 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bcc0a2319104,33541,1734110467458 2024-12-13T17:21:08,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:21:08,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:21:08,155 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/hbase.id with ID: 9dcd6948-0197-4e6a-8abd-925dd78b8e44 2024-12-13T17:21:08,166 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:08,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:21:08,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:21:08,225 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:21:08,226 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T17:21:08,226 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:21:08,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:21:08,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:21:08,235 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store 2024-12-13T17:21:08,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:21:08,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:21:08,248 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:08,248 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:21:08,248 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:08,248 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:08,248 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:21:08,248 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:08,248 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:21:08,248 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:21:08,249 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/.initializing 2024-12-13T17:21:08,249 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/WALs/bcc0a2319104,33541,1734110467458 2024-12-13T17:21:08,252 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C33541%2C1734110467458, suffix=, logDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/WALs/bcc0a2319104,33541,1734110467458, archiveDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/oldWALs, maxLogs=10 2024-12-13T17:21:08,252 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C33541%2C1734110467458.1734110468252 2024-12-13T17:21:08,260 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/WALs/bcc0a2319104,33541,1734110467458/bcc0a2319104%2C33541%2C1734110467458.1734110468252 2024-12-13T17:21:08,260 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:39017:39017)] 2024-12-13T17:21:08,260 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:21:08,260 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:08,260 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,260 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T17:21:08,263 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:21:08,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T17:21:08,265 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:21:08,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T17:21:08,266 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:21:08,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T17:21:08,268 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:21:08,269 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,269 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,271 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T17:21:08,272 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:21:08,274 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:21:08,274 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724183, jitterRate=-0.07915399968624115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T17:21:08,275 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:21:08,275 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T17:21:08,278 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76e485b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:21:08,279 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T17:21:08,279 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T17:21:08,279 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T17:21:08,279 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T17:21:08,280 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-13T17:21:08,280 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-13T17:21:08,280 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T17:21:08,282 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T17:21:08,283 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T17:21:08,338 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T17:21:08,339 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T17:21:08,340 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T17:21:08,363 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T17:21:08,364 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T17:21:08,366 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T17:21:08,411 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T17:21:08,412 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T17:21:08,470 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T17:21:08,472 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T17:21:08,514 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T17:21:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:21:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:21:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,562 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=bcc0a2319104,33541,1734110467458, sessionid=0x1002068f1f20000, setting cluster-up flag (Was=false) 2024-12-13T17:21:08,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,670 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T17:21:08,671 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,33541,1734110467458 2024-12-13T17:21:08,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:08,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:08,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:08,796 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T17:21:08,797 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,33541,1734110467458 2024-12-13T17:21:08,800 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T17:21:08,800 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T17:21:08,800 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T17:21:08,800 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bcc0a2319104,33541,1734110467458 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T17:21:08,800 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:21:08,800 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:21:08,800 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:21:08,800 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:21:08,801 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bcc0a2319104:0, corePoolSize=10, maxPoolSize=10 2024-12-13T17:21:08,801 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,801 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:21:08,801 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,802 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734110498802 2024-12-13T17:21:08,802 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T17:21:08,802 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T17:21:08,802 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:21:08,802 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T17:21:08,802 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T17:21:08,802 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T17:21:08,802 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T17:21:08,802 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T17:21:08,803 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,803 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T17:21:08,803 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T17:21:08,803 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T17:21:08,803 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,803 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:21:08,804 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T17:21:08,804 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T17:21:08,805 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110468804,5,FailOnTimeoutGroup] 2024-12-13T17:21:08,805 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110468805,5,FailOnTimeoutGroup] 2024-12-13T17:21:08,805 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,805 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T17:21:08,805 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,805 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:21:08,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:21:08,816 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T17:21:08,816 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16 2024-12-13T17:21:08,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:21:08,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:21:08,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:08,833 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bcc0a2319104:44309 2024-12-13T17:21:08,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:21:08,834 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1008): ClusterId : 9dcd6948-0197-4e6a-8abd-925dd78b8e44 2024-12-13T17:21:08,834 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T17:21:08,837 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:21:08,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:21:08,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:21:08,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:21:08,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:21:08,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:21:08,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:21:08,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:08,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:21:08,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740 2024-12-13T17:21:08,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740 2024-12-13T17:21:08,844 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T17:21:08,844 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T17:21:08,845 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:21:08,846 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:21:08,848 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:21:08,849 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731568, jitterRate=-0.06976385414600372}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:21:08,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:21:08,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:21:08,849 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:21:08,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:21:08,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:21:08,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:21:08,851 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:21:08,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:21:08,852 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:21:08,852 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T17:21:08,853 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T17:21:08,854 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T17:21:08,854 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T17:21:08,855 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T17:21:08,855 DEBUG [RS:0;bcc0a2319104:44309 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d48394a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:21:08,855 DEBUG [RS:0;bcc0a2319104:44309 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e3a388f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:21:08,855 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T17:21:08,855 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T17:21:08,855 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T17:21:08,856 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(3073): reportForDuty to master=bcc0a2319104,33541,1734110467458 with isa=bcc0a2319104/172.17.0.2:44309, startcode=1734110467950 2024-12-13T17:21:08,856 DEBUG [RS:0;bcc0a2319104:44309 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:21:08,861 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49987, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:21:08,861 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33541 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bcc0a2319104,44309,1734110467950 2024-12-13T17:21:08,862 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33541 {}] master.ServerManager(486): Registering regionserver=bcc0a2319104,44309,1734110467950 2024-12-13T17:21:08,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:08,863 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16 2024-12-13T17:21:08,863 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41007 2024-12-13T17:21:08,863 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T17:21:08,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:21:08,871 DEBUG [RS:0;bcc0a2319104:44309 {}] zookeeper.ZKUtil(111): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bcc0a2319104,44309,1734110467950 2024-12-13T17:21:08,871 WARN [RS:0;bcc0a2319104:44309 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:21:08,871 INFO [RS:0;bcc0a2319104:44309 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:21:08,871 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950 2024-12-13T17:21:08,872 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bcc0a2319104,44309,1734110467950] 2024-12-13T17:21:08,875 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T17:21:08,875 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T17:21:08,879 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T17:21:08,880 INFO [RS:0;bcc0a2319104:44309 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:21:08,880 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,880 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T17:21:08,881 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:21:08,882 DEBUG [RS:0;bcc0a2319104:44309 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:21:08,883 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,883 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,883 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,883 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,883 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,44309,1734110467950-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:21:08,902 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T17:21:08,902 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,44309,1734110467950-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:08,921 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.Replication(204): bcc0a2319104,44309,1734110467950 started 2024-12-13T17:21:08,921 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1767): Serving as bcc0a2319104,44309,1734110467950, RpcServer on bcc0a2319104/172.17.0.2:44309, sessionid=0x1002068f1f20001 2024-12-13T17:21:08,921 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T17:21:08,921 DEBUG [RS:0;bcc0a2319104:44309 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bcc0a2319104,44309,1734110467950 2024-12-13T17:21:08,921 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,44309,1734110467950' 2024-12-13T17:21:08,921 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T17:21:08,922 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T17:21:08,923 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T17:21:08,923 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T17:21:08,923 DEBUG [RS:0;bcc0a2319104:44309 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bcc0a2319104,44309,1734110467950 2024-12-13T17:21:08,923 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,44309,1734110467950' 2024-12-13T17:21:08,923 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T17:21:08,923 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T17:21:08,923 DEBUG [RS:0;bcc0a2319104:44309 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T17:21:08,923 INFO [RS:0;bcc0a2319104:44309 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T17:21:08,923 INFO [RS:0;bcc0a2319104:44309 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T17:21:09,005 WARN [bcc0a2319104:33541 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T17:21:09,026 INFO [RS:0;bcc0a2319104:44309 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C44309%2C1734110467950, suffix=, logDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950, archiveDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/oldWALs, maxLogs=32 2024-12-13T17:21:09,026 INFO [RS:0;bcc0a2319104:44309 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C44309%2C1734110467950.1734110469026 2024-12-13T17:21:09,042 INFO [RS:0;bcc0a2319104:44309 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110469026 2024-12-13T17:21:09,042 DEBUG [RS:0;bcc0a2319104:44309 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39017:39017),(127.0.0.1/127.0.0.1:43705:43705)] 2024-12-13T17:21:09,255 DEBUG [bcc0a2319104:33541 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-13T17:21:09,256 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bcc0a2319104,44309,1734110467950 2024-12-13T17:21:09,257 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,44309,1734110467950, state=OPENING 2024-12-13T17:21:09,262 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T17:21:09,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:09,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:09,280 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=bcc0a2319104,44309,1734110467950}] 2024-12-13T17:21:09,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:21:09,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:21:09,433 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:21:09,433 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T17:21:09,435 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55564, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T17:21:09,443 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T17:21:09,443 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:21:09,446 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C44309%2C1734110467950.meta, suffix=.meta, logDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950, archiveDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/oldWALs, maxLogs=32 2024-12-13T17:21:09,456 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C44309%2C1734110467950.meta.1734110469456.meta 2024-12-13T17:21:09,480 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.meta.1734110469456.meta 2024-12-13T17:21:09,480 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39017:39017),(127.0.0.1/127.0.0.1:43705:43705)] 2024-12-13T17:21:09,480 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:21:09,481 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T17:21:09,481 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T17:21:09,481 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T17:21:09,481 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T17:21:09,481 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:09,481 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T17:21:09,481 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T17:21:09,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:21:09,483 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:21:09,483 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:09,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:21:09,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:21:09,484 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:21:09,484 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:09,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:21:09,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:21:09,485 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:21:09,485 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:09,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:21:09,487 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740 2024-12-13T17:21:09,488 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740 2024-12-13T17:21:09,489 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:21:09,490 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:21:09,491 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760989, jitterRate=-0.03235328197479248}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:21:09,491 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:21:09,492 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734110469433 2024-12-13T17:21:09,493 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T17:21:09,493 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T17:21:09,494 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,44309,1734110467950 2024-12-13T17:21:09,494 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,44309,1734110467950, state=OPEN 2024-12-13T17:21:09,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:21:09,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:21:09,523 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:21:09,523 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:21:09,525 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T17:21:09,525 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=bcc0a2319104,44309,1734110467950 in 243 msec 2024-12-13T17:21:09,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T17:21:09,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 672 msec 2024-12-13T17:21:09,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 729 msec 2024-12-13T17:21:09,530 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734110469530, completionTime=-1 2024-12-13T17:21:09,530 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-13T17:21:09,530 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T17:21:09,531 DEBUG [hconnection-0x6e9d99ac-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:21:09,532 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:21:09,533 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-13T17:21:09,533 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734110529533 2024-12-13T17:21:09,533 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734110589533 2024-12-13T17:21:09,533 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-13T17:21:09,554 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,33541,1734110467458-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:09,554 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,33541,1734110467458-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:09,554 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,33541,1734110467458-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:09,554 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bcc0a2319104:33541, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:09,554 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:09,554 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T17:21:09,554 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:21:09,556 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T17:21:09,556 DEBUG [master/bcc0a2319104:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T17:21:09,557 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:21:09,557 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:09,558 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:21:09,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:21:09,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:21:09,567 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => aa1510980ff757e5dd155751d8c08d6c, NAME => 'hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16 2024-12-13T17:21:09,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:21:09,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:21:09,590 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:09,590 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing aa1510980ff757e5dd155751d8c08d6c, disabling compactions & flushes 2024-12-13T17:21:09,591 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:09,591 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:09,591 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. after waiting 0 ms 2024-12-13T17:21:09,591 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:09,591 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:09,591 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for aa1510980ff757e5dd155751d8c08d6c: 2024-12-13T17:21:09,592 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:21:09,593 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734110469592"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110469592"}]},"ts":"1734110469592"} 2024-12-13T17:21:09,595 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:21:09,596 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:21:09,596 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110469596"}]},"ts":"1734110469596"} 2024-12-13T17:21:09,599 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T17:21:09,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=aa1510980ff757e5dd155751d8c08d6c, ASSIGN}] 2024-12-13T17:21:09,622 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=aa1510980ff757e5dd155751d8c08d6c, ASSIGN 2024-12-13T17:21:09,623 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=aa1510980ff757e5dd155751d8c08d6c, ASSIGN; state=OFFLINE, location=bcc0a2319104,44309,1734110467950; forceNewPlan=false, retain=false 2024-12-13T17:21:09,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:09,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:09,773 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=aa1510980ff757e5dd155751d8c08d6c, regionState=OPENING, regionLocation=bcc0a2319104,44309,1734110467950 2024-12-13T17:21:09,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure aa1510980ff757e5dd155751d8c08d6c, server=bcc0a2319104,44309,1734110467950}] 2024-12-13T17:21:09,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:09,929 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:21:09,936 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:09,936 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => aa1510980ff757e5dd155751d8c08d6c, NAME => 'hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:21:09,937 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:21:09,937 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:09,937 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:21:09,937 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:21:09,939 INFO [StoreOpener-aa1510980ff757e5dd155751d8c08d6c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:21:09,940 INFO [StoreOpener-aa1510980ff757e5dd155751d8c08d6c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa1510980ff757e5dd155751d8c08d6c columnFamilyName info 2024-12-13T17:21:09,940 DEBUG [StoreOpener-aa1510980ff757e5dd155751d8c08d6c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:09,941 INFO [StoreOpener-aa1510980ff757e5dd155751d8c08d6c-1 {}] regionserver.HStore(327): Store=aa1510980ff757e5dd155751d8c08d6c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:21:09,942 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:21:09,942 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:21:09,944 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:21:09,946 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:21:09,946 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened aa1510980ff757e5dd155751d8c08d6c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781240, jitterRate=-0.006603077054023743}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:21:09,946 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for aa1510980ff757e5dd155751d8c08d6c: 2024-12-13T17:21:09,947 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c., pid=6, masterSystemTime=1734110469929 2024-12-13T17:21:09,949 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:09,949 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:09,949 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=aa1510980ff757e5dd155751d8c08d6c, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,44309,1734110467950 2024-12-13T17:21:09,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T17:21:09,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure aa1510980ff757e5dd155751d8c08d6c, server=bcc0a2319104,44309,1734110467950 in 176 msec 2024-12-13T17:21:09,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T17:21:09,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=aa1510980ff757e5dd155751d8c08d6c, ASSIGN in 332 msec 2024-12-13T17:21:09,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:21:09,956 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110469955"}]},"ts":"1734110469955"} 2024-12-13T17:21:09,957 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T17:21:09,962 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T17:21:09,963 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:21:09,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 409 msec 2024-12-13T17:21:09,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:09,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:21:09,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:21:09,975 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T17:21:09,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:21:09,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-12-13T17:21:10,007 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T17:21:10,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:21:10,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 24 msec 2024-12-13T17:21:10,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T17:21:10,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T17:21:10,087 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.943sec 2024-12-13T17:21:10,088 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T17:21:10,088 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T17:21:10,088 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T17:21:10,088 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T17:21:10,088 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T17:21:10,088 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,33541,1734110467458-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:21:10,088 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,33541,1734110467458-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T17:21:10,090 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T17:21:10,090 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T17:21:10,090 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,33541,1734110467458-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:21:10,124 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e46a98b to 127.0.0.1:51199 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@74f0d8ba 2024-12-13T17:21:10,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64e04bcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:21:10,139 DEBUG [hconnection-0x208b7e3a-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:21:10,141 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:21:10,143 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=bcc0a2319104,33541,1734110467458 2024-12-13T17:21:10,144 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:21:10,147 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-13T17:21:10,148 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T17:21:10,150 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T17:21:10,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-13T17:21:10,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-13T17:21:10,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:21:10,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:10,154 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:21:10,154 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:10,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-13T17:21:10,156 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:21:10,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:21:10,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741837_1013 (size=405) 2024-12-13T17:21:10,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741837_1013 (size=405) 2024-12-13T17:21:10,166 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 45d204d19da67c6c7eecef911c799fb1, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16 2024-12-13T17:21:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741838_1014 (size=88) 2024-12-13T17:21:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741838_1014 (size=88) 2024-12-13T17:21:10,173 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:10,173 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing 45d204d19da67c6c7eecef911c799fb1, disabling compactions & flushes 2024-12-13T17:21:10,173 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:10,173 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:10,173 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. after waiting 0 ms 2024-12-13T17:21:10,173 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:10,173 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:10,173 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:21:10,174 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:21:10,174 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1734110470174"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110470174"}]},"ts":"1734110470174"} 2024-12-13T17:21:10,176 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:21:10,177 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:21:10,178 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110470177"}]},"ts":"1734110470177"} 2024-12-13T17:21:10,179 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-13T17:21:10,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45d204d19da67c6c7eecef911c799fb1, ASSIGN}] 2024-12-13T17:21:10,197 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45d204d19da67c6c7eecef911c799fb1, ASSIGN 2024-12-13T17:21:10,198 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45d204d19da67c6c7eecef911c799fb1, ASSIGN; state=OFFLINE, location=bcc0a2319104,44309,1734110467950; forceNewPlan=false, retain=false 2024-12-13T17:21:10,349 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=45d204d19da67c6c7eecef911c799fb1, regionState=OPENING, regionLocation=bcc0a2319104,44309,1734110467950 2024-12-13T17:21:10,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 45d204d19da67c6c7eecef911c799fb1, server=bcc0a2319104,44309,1734110467950}] 2024-12-13T17:21:10,503 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:21:10,507 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:10,507 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 45d204d19da67c6c7eecef911c799fb1, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:21:10,508 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:21:10,508 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:21:10,508 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:21:10,508 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:21:10,509 INFO [StoreOpener-45d204d19da67c6c7eecef911c799fb1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:21:10,511 INFO [StoreOpener-45d204d19da67c6c7eecef911c799fb1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45d204d19da67c6c7eecef911c799fb1 columnFamilyName info 2024-12-13T17:21:10,511 DEBUG [StoreOpener-45d204d19da67c6c7eecef911c799fb1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:21:10,511 INFO [StoreOpener-45d204d19da67c6c7eecef911c799fb1-1 {}] regionserver.HStore(327): Store=45d204d19da67c6c7eecef911c799fb1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:21:10,512 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:21:10,512 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:21:10,514 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:21:10,516 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:21:10,516 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 45d204d19da67c6c7eecef911c799fb1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798404, jitterRate=0.015223890542984009}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:21:10,517 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:21:10,518 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1., pid=11, masterSystemTime=1734110470503 2024-12-13T17:21:10,519 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:10,519 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:10,520 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=45d204d19da67c6c7eecef911c799fb1, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,44309,1734110467950 2024-12-13T17:21:10,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T17:21:10,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 45d204d19da67c6c7eecef911c799fb1, server=bcc0a2319104,44309,1734110467950 in 171 msec 2024-12-13T17:21:10,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T17:21:10,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=45d204d19da67c6c7eecef911c799fb1, ASSIGN in 328 msec 2024-12-13T17:21:10,527 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:21:10,527 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110470527"}]},"ts":"1734110470527"} 2024-12-13T17:21:10,529 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-13T17:21:10,538 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:21:10,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 386 msec 2024-12-13T17:21:10,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:10,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:10,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:11,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:11,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:11,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:12,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:12,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:12,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:13,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:13,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:13,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:14,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T17:21:14,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-13T17:21:14,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:14,020 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-13T17:21:14,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:14,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:14,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:14,983 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:21:14,984 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:14,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:14,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:14,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,009 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,010 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:21:15,020 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T17:21:15,020 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T17:21:15,021 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-13T17:21:15,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:15,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:15,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:16,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:16,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:16,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:17,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:17,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:17,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:18,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:18,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:18,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:19,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:19,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:19,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:20,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:21:20,158 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-13T17:21:20,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:20,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:20,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-13T17:21:20,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-13T17:21:20,177 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-13T17:21:20,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T17:21:20,178 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T17:21:20,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T17:21:20,338 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:21:20,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44309 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-13T17:21:20,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:20,340 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing aa1510980ff757e5dd155751d8c08d6c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T17:21:20,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c/.tmp/info/9ab08d6cf1ca45d7b5fa1fc7fb594717 is 45, key is default/info:d/1734110469980/Put/seqid=0 2024-12-13T17:21:20,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741839_1015 (size=5037) 2024-12-13T17:21:20,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741839_1015 (size=5037) 2024-12-13T17:21:20,361 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c/.tmp/info/9ab08d6cf1ca45d7b5fa1fc7fb594717 2024-12-13T17:21:20,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c/.tmp/info/9ab08d6cf1ca45d7b5fa1fc7fb594717 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c/info/9ab08d6cf1ca45d7b5fa1fc7fb594717 2024-12-13T17:21:20,372 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c/info/9ab08d6cf1ca45d7b5fa1fc7fb594717, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T17:21:20,373 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for aa1510980ff757e5dd155751d8c08d6c in 33ms, sequenceid=6, compaction requested=false 2024-12-13T17:21:20,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for aa1510980ff757e5dd155751d8c08d6c: 2024-12-13T17:21:20,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:21:20,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-13T17:21:20,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-13T17:21:20,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-13T17:21:20,381 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-12-13T17:21:20,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 208 msec 2024-12-13T17:21:20,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:20,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:20,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:21,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:21,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:21,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:22,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:22,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:22,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:23,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:23,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:23,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:24,597 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 4ae54d53dadb6f7d5f47a16f2daa01e3, had cached 0 bytes from a total of 23930 2024-12-13T17:21:24,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:24,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:24,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:25,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:25,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:25,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:26,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:26,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:26,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:27,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:27,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:27,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:28,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:28,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:28,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:29,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:29,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:29,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:30,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-13T17:21:30,179 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-13T17:21:30,187 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:30,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:30,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-13T17:21:30,189 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-13T17:21:30,190 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T17:21:30,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T17:21:30,342 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:21:30,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44309 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-13T17:21:30,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:30,345 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 45d204d19da67c6c7eecef911c799fb1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-13T17:21:30,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/97397d95ceff4af2bf109158a7c1c64b is 1080, key is row0001/info:/1734110490182/Put/seqid=0 2024-12-13T17:21:30,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741840_1016 (size=6033) 2024-12-13T17:21:30,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741840_1016 (size=6033) 2024-12-13T17:21:30,373 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/97397d95ceff4af2bf109158a7c1c64b 2024-12-13T17:21:30,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/97397d95ceff4af2bf109158a7c1c64b as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/97397d95ceff4af2bf109158a7c1c64b 2024-12-13T17:21:30,384 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/97397d95ceff4af2bf109158a7c1c64b, entries=1, sequenceid=5, filesize=5.9 K 2024-12-13T17:21:30,385 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45d204d19da67c6c7eecef911c799fb1 in 40ms, sequenceid=5, compaction requested=false 2024-12-13T17:21:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:21:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:30,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-13T17:21:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-13T17:21:30,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-13T17:21:30,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 196 msec 2024-12-13T17:21:30,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 202 msec 2024-12-13T17:21:30,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:30,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:30,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:31,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:31,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:31,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:32,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:32,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:32,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:33,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:33,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:33,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:34,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:34,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:34,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:35,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:35,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:35,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:36,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:36,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:36,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:37,440 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:21:37,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:37,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:37,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:38,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:38,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:38,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:39,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:39,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:39,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-13T17:21:40,191 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-13T17:21:40,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:40,202 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-13T17:21:40,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T17:21:40,204 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T17:21:40,204 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T17:21:40,356 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:21:40,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44309 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-13T17:21:40,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:40,357 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 45d204d19da67c6c7eecef911c799fb1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-13T17:21:40,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/611b9b2d6f444a95b1eb2b4f782e76e1 is 1080, key is row0002/info:/1734110500192/Put/seqid=0 2024-12-13T17:21:40,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741841_1017 (size=6033) 2024-12-13T17:21:40,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741841_1017 (size=6033) 2024-12-13T17:21:40,366 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/611b9b2d6f444a95b1eb2b4f782e76e1 2024-12-13T17:21:40,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/611b9b2d6f444a95b1eb2b4f782e76e1 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/611b9b2d6f444a95b1eb2b4f782e76e1 2024-12-13T17:21:40,378 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/611b9b2d6f444a95b1eb2b4f782e76e1, entries=1, sequenceid=9, filesize=5.9 K 2024-12-13T17:21:40,379 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45d204d19da67c6c7eecef911c799fb1 in 22ms, sequenceid=9, compaction requested=false 2024-12-13T17:21:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:21:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-13T17:21:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-13T17:21:40,382 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-13T17:21:40,382 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-13T17:21:40,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-12-13T17:21:40,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:40,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:40,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:41,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:41,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:41,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:42,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:42,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:42,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:43,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:43,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:43,855 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:21:43,857 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:21:43,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:44,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:44,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:44,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:45,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:45,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:45,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:46,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:46,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:46,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:47,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:47,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:47,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:48,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:48,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:48,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:49,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:49,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:49,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:50,096 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T17:21:50,096 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T17:21:50,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-13T17:21:50,204 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-13T17:21:50,206 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C44309%2C1734110467950.1734110510206 2024-12-13T17:21:50,213 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110469026 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110510206 2024-12-13T17:21:50,213 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:39017:39017)] 2024-12-13T17:21:50,213 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110469026 is not closed yet, will try archiving it next time 2024-12-13T17:21:50,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741833_1009 (size=6574) 2024-12-13T17:21:50,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741833_1009 (size=6574) 2024-12-13T17:21:50,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:50,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:21:50,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-13T17:21:50,218 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-13T17:21:50,218 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T17:21:50,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T17:21:50,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:21:50,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44309 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-13T17:21:50,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:50,371 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 45d204d19da67c6c7eecef911c799fb1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-13T17:21:50,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/899899b0068043e6b4b991d43a18493f is 1080, key is row0003/info:/1734110510205/Put/seqid=0 2024-12-13T17:21:50,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741843_1019 (size=6033) 2024-12-13T17:21:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741843_1019 (size=6033) 2024-12-13T17:21:50,387 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/899899b0068043e6b4b991d43a18493f 2024-12-13T17:21:50,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/899899b0068043e6b4b991d43a18493f as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/899899b0068043e6b4b991d43a18493f 2024-12-13T17:21:50,400 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/899899b0068043e6b4b991d43a18493f, entries=1, sequenceid=13, filesize=5.9 K 2024-12-13T17:21:50,401 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45d204d19da67c6c7eecef911c799fb1 in 30ms, sequenceid=13, compaction requested=true 2024-12-13T17:21:50,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:21:50,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:21:50,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-13T17:21:50,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-13T17:21:50,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-13T17:21:50,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-12-13T17:21:50,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-12-13T17:21:50,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:50,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:50,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:51,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:51,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:51,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:52,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:52,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:52,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:53,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:53,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:53,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:54,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:54,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:54,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:54,937 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region aa1510980ff757e5dd155751d8c08d6c, had cached 0 bytes from a total of 5037 2024-12-13T17:21:55,508 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 45d204d19da67c6c7eecef911c799fb1, had cached 0 bytes from a total of 18099 2024-12-13T17:21:55,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:55,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:55,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:56,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:56,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:56,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:57,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:57,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:57,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:58,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:58,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:58,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:59,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:59,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:21:59,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-13T17:22:00,219 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-13T17:22:00,219 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:22:00,220 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:22:00,220 DEBUG [Time-limited test {}] regionserver.HStore(1540): 45d204d19da67c6c7eecef911c799fb1/info is initiating minor compaction (all files) 2024-12-13T17:22:00,220 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:22:00,220 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:00,221 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of 45d204d19da67c6c7eecef911c799fb1/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:00,221 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/97397d95ceff4af2bf109158a7c1c64b, hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/611b9b2d6f444a95b1eb2b4f782e76e1, hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/899899b0068043e6b4b991d43a18493f] into tmpdir=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp, totalSize=17.7 K 2024-12-13T17:22:00,221 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 97397d95ceff4af2bf109158a7c1c64b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1734110490182 2024-12-13T17:22:00,221 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 611b9b2d6f444a95b1eb2b4f782e76e1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1734110500192 2024-12-13T17:22:00,222 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 899899b0068043e6b4b991d43a18493f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734110510205 2024-12-13T17:22:00,233 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 45d204d19da67c6c7eecef911c799fb1#info#compaction#32 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:22:00,234 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/8a31564e5c6e488fb8786d423d72eff3 is 1080, key is row0001/info:/1734110490182/Put/seqid=0 2024-12-13T17:22:00,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741844_1020 (size=8296) 2024-12-13T17:22:00,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741844_1020 (size=8296) 2024-12-13T17:22:00,247 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/8a31564e5c6e488fb8786d423d72eff3 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/8a31564e5c6e488fb8786d423d72eff3 2024-12-13T17:22:00,253 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 45d204d19da67c6c7eecef911c799fb1/info of 45d204d19da67c6c7eecef911c799fb1 into 8a31564e5c6e488fb8786d423d72eff3(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:22:00,253 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:22:00,255 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C44309%2C1734110467950.1734110520255 2024-12-13T17:22:00,265 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110510206 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110520255 2024-12-13T17:22:00,265 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:39017:39017)] 2024-12-13T17:22:00,265 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110510206 is not closed yet, will try archiving it next time 2024-12-13T17:22:00,266 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110469026 to hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/oldWALs/bcc0a2319104%2C44309%2C1734110467950.1734110469026 2024-12-13T17:22:00,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741842_1018 (size=2520) 2024-12-13T17:22:00,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741842_1018 (size=2520) 2024-12-13T17:22:00,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:22:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:22:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T17:22:00,270 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-13T17:22:00,271 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-13T17:22:00,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-13T17:22:00,423 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,44309,1734110467950 2024-12-13T17:22:00,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44309 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-13T17:22:00,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:00,424 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 45d204d19da67c6c7eecef911c799fb1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-13T17:22:00,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/4c27e588187c4af5beb605a999827548 is 1080, key is row0000/info:/1734110520254/Put/seqid=0 2024-12-13T17:22:00,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741846_1022 (size=6033) 2024-12-13T17:22:00,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741846_1022 (size=6033) 2024-12-13T17:22:00,433 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/4c27e588187c4af5beb605a999827548 2024-12-13T17:22:00,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/4c27e588187c4af5beb605a999827548 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/4c27e588187c4af5beb605a999827548 2024-12-13T17:22:00,444 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/4c27e588187c4af5beb605a999827548, entries=1, sequenceid=18, filesize=5.9 K 2024-12-13T17:22:00,445 INFO [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45d204d19da67c6c7eecef911c799fb1 in 21ms, sequenceid=18, compaction requested=false 2024-12-13T17:22:00,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:22:00,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:00,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-13T17:22:00,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-13T17:22:00,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-13T17:22:00,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-12-13T17:22:00,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 180 msec 2024-12-13T17:22:00,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:00,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:00,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:01,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:01,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:01,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:02,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:02,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:02,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:03,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:03,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:03,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:04,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:04,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:04,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:05,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:05,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:05,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:06,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:06,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:06,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:07,440 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:22:07,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:07,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:07,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:08,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:08,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:08,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:09,588 DEBUG [master/bcc0a2319104:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region aa1510980ff757e5dd155751d8c08d6c changed from -1.0 to 0.0, refreshing cache 2024-12-13T17:22:09,598 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 4ae54d53dadb6f7d5f47a16f2daa01e3, had cached 0 bytes from a total of 23930 2024-12-13T17:22:09,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:09,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:09,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-13T17:22:10,272 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-13T17:22:10,274 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C44309%2C1734110467950.1734110530274 2024-12-13T17:22:10,283 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110520255 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110530274 2024-12-13T17:22:10,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43705:43705),(127.0.0.1/127.0.0.1:39017:39017)] 2024-12-13T17:22:10,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110520255 is not closed yet, will try archiving it next time 2024-12-13T17:22:10,283 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950/bcc0a2319104%2C44309%2C1734110467950.1734110510206 to hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/oldWALs/bcc0a2319104%2C44309%2C1734110467950.1734110510206 2024-12-13T17:22:10,283 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T17:22:10,283 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T17:22:10,284 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e46a98b to 127.0.0.1:51199 2024-12-13T17:22:10,284 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:22:10,284 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T17:22:10,284 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1150642057, stopped=false 2024-12-13T17:22:10,284 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=bcc0a2319104,33541,1734110467458 2024-12-13T17:22:10,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741845_1021 (size=2026) 2024-12-13T17:22:10,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741845_1021 (size=2026) 2024-12-13T17:22:10,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:22:10,294 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T17:22:10,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:22:10,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:10,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:10,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:22:10,294 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,44309,1734110467950' ***** 2024-12-13T17:22:10,294 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T17:22:10,294 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:22:10,294 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T17:22:10,294 INFO [RS:0;bcc0a2319104:44309 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T17:22:10,294 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(3579): Received CLOSE for aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:22:10,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(3579): Received CLOSE for 45d204d19da67c6c7eecef911c799fb1 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,44309,1734110467950 2024-12-13T17:22:10,295 DEBUG [RS:0;bcc0a2319104:44309 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing aa1510980ff757e5dd155751d8c08d6c, disabling compactions & flushes 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T17:22:10,295 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. after waiting 0 ms 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T17:22:10,295 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-13T17:22:10,295 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1603): Online Regions={aa1510980ff757e5dd155751d8c08d6c=hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c., 1588230740=hbase:meta,,1.1588230740, 45d204d19da67c6c7eecef911c799fb1=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.} 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:22:10,295 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:22:10,295 DEBUG [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 45d204d19da67c6c7eecef911c799fb1, aa1510980ff757e5dd155751d8c08d6c 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:22:10,295 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:22:10,296 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-13T17:22:10,299 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/namespace/aa1510980ff757e5dd155751d8c08d6c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T17:22:10,299 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:22:10,299 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for aa1510980ff757e5dd155751d8c08d6c: 2024-12-13T17:22:10,299 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734110469554.aa1510980ff757e5dd155751d8c08d6c. 2024-12-13T17:22:10,299 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 45d204d19da67c6c7eecef911c799fb1, disabling compactions & flushes 2024-12-13T17:22:10,299 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:10,299 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:10,299 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. after waiting 0 ms 2024-12-13T17:22:10,299 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:10,300 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 45d204d19da67c6c7eecef911c799fb1 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-13T17:22:10,303 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/c014e197a0634247918deab6bcc9d4d7 is 1080, key is row0001/info:/1734110530272/Put/seqid=0 2024-12-13T17:22:10,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741848_1024 (size=6033) 2024-12-13T17:22:10,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741848_1024 (size=6033) 2024-12-13T17:22:10,308 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/c014e197a0634247918deab6bcc9d4d7 2024-12-13T17:22:10,314 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/.tmp/info/c014e197a0634247918deab6bcc9d4d7 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/c014e197a0634247918deab6bcc9d4d7 2024-12-13T17:22:10,317 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/.tmp/info/a0d3629b91d54e65b4e07b94a595dc50 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1./info:regioninfo/1734110470520/Put/seqid=0 2024-12-13T17:22:10,320 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/c014e197a0634247918deab6bcc9d4d7, entries=1, sequenceid=22, filesize=5.9 K 2024-12-13T17:22:10,321 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 45d204d19da67c6c7eecef911c799fb1 in 22ms, sequenceid=22, compaction requested=true 2024-12-13T17:22:10,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741849_1025 (size=8430) 2024-12-13T17:22:10,321 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/97397d95ceff4af2bf109158a7c1c64b, hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/611b9b2d6f444a95b1eb2b4f782e76e1, hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/899899b0068043e6b4b991d43a18493f] to archive 2024-12-13T17:22:10,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741849_1025 (size=8430) 2024-12-13T17:22:10,322 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/.tmp/info/a0d3629b91d54e65b4e07b94a595dc50 2024-12-13T17:22:10,322 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T17:22:10,324 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/97397d95ceff4af2bf109158a7c1c64b to hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/97397d95ceff4af2bf109158a7c1c64b 2024-12-13T17:22:10,324 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/611b9b2d6f444a95b1eb2b4f782e76e1 to hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/611b9b2d6f444a95b1eb2b4f782e76e1 2024-12-13T17:22:10,324 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/899899b0068043e6b4b991d43a18493f to hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/info/899899b0068043e6b4b991d43a18493f 2024-12-13T17:22:10,328 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/45d204d19da67c6c7eecef911c799fb1/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-13T17:22:10,329 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:10,329 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 45d204d19da67c6c7eecef911c799fb1: 2024-12-13T17:22:10,329 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1734110470151.45d204d19da67c6c7eecef911c799fb1. 2024-12-13T17:22:10,341 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/.tmp/table/60fb302d655341e2ac8e024765610c25 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1734110470527/Put/seqid=0 2024-12-13T17:22:10,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741850_1026 (size=5532) 2024-12-13T17:22:10,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741850_1026 (size=5532) 2024-12-13T17:22:10,346 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/.tmp/table/60fb302d655341e2ac8e024765610c25 2024-12-13T17:22:10,352 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/.tmp/info/a0d3629b91d54e65b4e07b94a595dc50 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/info/a0d3629b91d54e65b4e07b94a595dc50 2024-12-13T17:22:10,358 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/info/a0d3629b91d54e65b4e07b94a595dc50, entries=20, sequenceid=14, filesize=8.2 K 2024-12-13T17:22:10,358 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/.tmp/table/60fb302d655341e2ac8e024765610c25 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/table/60fb302d655341e2ac8e024765610c25 2024-12-13T17:22:10,364 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/table/60fb302d655341e2ac8e024765610c25, entries=4, sequenceid=14, filesize=5.4 K 2024-12-13T17:22:10,365 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 70ms, sequenceid=14, compaction requested=false 2024-12-13T17:22:10,372 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-13T17:22:10,373 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T17:22:10,373 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:22:10,373 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:22:10,373 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T17:22:10,496 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,44309,1734110467950; all regions closed. 2024-12-13T17:22:10,496 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950 2024-12-13T17:22:10,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741834_1010 (size=4570) 2024-12-13T17:22:10,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741834_1010 (size=4570) 2024-12-13T17:22:10,503 DEBUG [RS:0;bcc0a2319104:44309 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/oldWALs 2024-12-13T17:22:10,503 INFO [RS:0;bcc0a2319104:44309 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C44309%2C1734110467950.meta:.meta(num 1734110469456) 2024-12-13T17:22:10,503 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/WALs/bcc0a2319104,44309,1734110467950 2024-12-13T17:22:10,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741847_1023 (size=1545) 2024-12-13T17:22:10,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741847_1023 (size=1545) 2024-12-13T17:22:10,511 DEBUG [RS:0;bcc0a2319104:44309 {}] wal.AbstractFSWAL(1071): Moved 2 WAL file(s) to /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/oldWALs 2024-12-13T17:22:10,511 INFO [RS:0;bcc0a2319104:44309 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C44309%2C1734110467950:(num 1734110530274) 2024-12-13T17:22:10,511 DEBUG [RS:0;bcc0a2319104:44309 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:22:10,511 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:22:10,512 INFO [RS:0;bcc0a2319104:44309 {}] hbase.ChoreService(370): Chore service for: regionserver/bcc0a2319104:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-13T17:22:10,512 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:22:10,513 INFO [RS:0;bcc0a2319104:44309 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44309 2024-12-13T17:22:10,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:22:10,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bcc0a2319104,44309,1734110467950 2024-12-13T17:22:10,527 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bcc0a2319104,44309,1734110467950] 2024-12-13T17:22:10,527 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bcc0a2319104,44309,1734110467950; numProcessing=1 2024-12-13T17:22:10,535 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bcc0a2319104,44309,1734110467950 already deleted, retry=false 2024-12-13T17:22:10,535 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bcc0a2319104,44309,1734110467950 expired; onlineServers=0 2024-12-13T17:22:10,536 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,33541,1734110467458' ***** 2024-12-13T17:22:10,536 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T17:22:10,536 DEBUG [M:0;bcc0a2319104:33541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14273897, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:22:10,536 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,33541,1734110467458 2024-12-13T17:22:10,536 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,33541,1734110467458; all regions closed. 2024-12-13T17:22:10,536 DEBUG [M:0;bcc0a2319104:33541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:22:10,536 DEBUG [M:0;bcc0a2319104:33541 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T17:22:10,536 DEBUG [M:0;bcc0a2319104:33541 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T17:22:10,536 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T17:22:10,536 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110468804 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110468804,5,FailOnTimeoutGroup] 2024-12-13T17:22:10,536 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110468805 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110468805,5,FailOnTimeoutGroup] 2024-12-13T17:22:10,537 INFO [M:0;bcc0a2319104:33541 {}] hbase.ChoreService(370): Chore service for: master/bcc0a2319104:0 had [] on shutdown 2024-12-13T17:22:10,537 DEBUG [M:0;bcc0a2319104:33541 {}] master.HMaster(1733): Stopping service threads 2024-12-13T17:22:10,537 INFO [M:0;bcc0a2319104:33541 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T17:22:10,537 INFO [M:0;bcc0a2319104:33541 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T17:22:10,537 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T17:22:10,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T17:22:10,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:10,544 DEBUG [M:0;bcc0a2319104:33541 {}] zookeeper.ZKUtil(347): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T17:22:10,544 WARN [M:0;bcc0a2319104:33541 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T17:22:10,544 INFO [M:0;bcc0a2319104:33541 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T17:22:10,544 INFO [M:0;bcc0a2319104:33541 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T17:22:10,544 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:22:10,544 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:10,544 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:10,544 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:22:10,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:22:10,544 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:10,544 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.10 KB heapSize=81.72 KB 2024-12-13T17:22:10,560 DEBUG [M:0;bcc0a2319104:33541 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77825e296063411185d85a28f267316a is 82, key is hbase:meta,,1/info:regioninfo/1734110469494/Put/seqid=0 2024-12-13T17:22:10,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741851_1027 (size=5672) 2024-12-13T17:22:10,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741851_1027 (size=5672) 2024-12-13T17:22:10,565 INFO [M:0;bcc0a2319104:33541 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77825e296063411185d85a28f267316a 2024-12-13T17:22:10,583 DEBUG [M:0;bcc0a2319104:33541 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a6dc8227c454b2b8b41bda066284660 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734110470539/Put/seqid=0 2024-12-13T17:22:10,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741852_1028 (size=8358) 2024-12-13T17:22:10,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741852_1028 (size=8358) 2024-12-13T17:22:10,588 INFO [M:0;bcc0a2319104:33541 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.49 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a6dc8227c454b2b8b41bda066284660 2024-12-13T17:22:10,593 INFO [M:0;bcc0a2319104:33541 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9a6dc8227c454b2b8b41bda066284660 2024-12-13T17:22:10,607 DEBUG [M:0;bcc0a2319104:33541 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5fc7f9a826e54bb4bcdedef2090fe86b is 69, key is bcc0a2319104,44309,1734110467950/rs:state/1734110468862/Put/seqid=0 2024-12-13T17:22:10,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741853_1029 (size=5156) 2024-12-13T17:22:10,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741853_1029 (size=5156) 2024-12-13T17:22:10,613 INFO [M:0;bcc0a2319104:33541 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5fc7f9a826e54bb4bcdedef2090fe86b 2024-12-13T17:22:10,627 INFO [RS:0;bcc0a2319104:44309 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,44309,1734110467950; zookeeper connection closed. 2024-12-13T17:22:10,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:22:10,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44309-0x1002068f1f20001, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:22:10,627 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@bca2719 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@bca2719 2024-12-13T17:22:10,628 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-13T17:22:10,632 DEBUG [M:0;bcc0a2319104:33541 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e803a69a122428ea23a70623b46d039 is 52, key is load_balancer_on/state:d/1734110470146/Put/seqid=0 2024-12-13T17:22:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741854_1030 (size=5056) 2024-12-13T17:22:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741854_1030 (size=5056) 2024-12-13T17:22:10,638 INFO [M:0;bcc0a2319104:33541 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e803a69a122428ea23a70623b46d039 2024-12-13T17:22:10,643 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77825e296063411185d85a28f267316a as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/77825e296063411185d85a28f267316a 2024-12-13T17:22:10,647 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/77825e296063411185d85a28f267316a, entries=8, sequenceid=184, filesize=5.5 K 2024-12-13T17:22:10,648 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a6dc8227c454b2b8b41bda066284660 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a6dc8227c454b2b8b41bda066284660 2024-12-13T17:22:10,653 INFO [M:0;bcc0a2319104:33541 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9a6dc8227c454b2b8b41bda066284660 2024-12-13T17:22:10,653 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a6dc8227c454b2b8b41bda066284660, entries=21, sequenceid=184, filesize=8.2 K 2024-12-13T17:22:10,654 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5fc7f9a826e54bb4bcdedef2090fe86b as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5fc7f9a826e54bb4bcdedef2090fe86b 2024-12-13T17:22:10,659 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5fc7f9a826e54bb4bcdedef2090fe86b, entries=1, sequenceid=184, filesize=5.0 K 2024-12-13T17:22:10,660 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e803a69a122428ea23a70623b46d039 as hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7e803a69a122428ea23a70623b46d039 2024-12-13T17:22:10,665 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41007/user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7e803a69a122428ea23a70623b46d039, entries=1, sequenceid=184, filesize=4.9 K 2024-12-13T17:22:10,666 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.10 KB/66658, heapSize ~81.66 KB/83616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=184, compaction requested=false 2024-12-13T17:22:10,670 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:10,670 DEBUG [M:0;bcc0a2319104:33541 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:22:10,671 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/6a0cdb9b-185a-e33d-8821-1e2248997e16/MasterData/WALs/bcc0a2319104,33541,1734110467458 2024-12-13T17:22:10,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40217 is added to blk_1073741830_1006 (size=79179) 2024-12-13T17:22:10,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36517 is added to blk_1073741830_1006 (size=79179) 2024-12-13T17:22:10,673 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:22:10,673 INFO [M:0;bcc0a2319104:33541 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T17:22:10,673 INFO [M:0;bcc0a2319104:33541 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33541 2024-12-13T17:22:10,677 DEBUG [M:0;bcc0a2319104:33541 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/bcc0a2319104,33541,1734110467458 already deleted, retry=false 2024-12-13T17:22:10,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:10,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:10,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:22:10,785 INFO [M:0;bcc0a2319104:33541 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,33541,1734110467458; zookeeper connection closed. 2024-12-13T17:22:10,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33541-0x1002068f1f20000, quorum=127.0.0.1:51199, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:22:10,788 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@554c1d71{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:22:10,788 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@726e788f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:22:10,788 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:22:10,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a11cb82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:22:10,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bc0a4a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir/,STOPPED} 2024-12-13T17:22:10,790 WARN [BP-801480977-172.17.0.2-1734110465550 heartbeating to localhost/127.0.0.1:41007 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:22:10,790 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:22:10,790 WARN [BP-801480977-172.17.0.2-1734110465550 heartbeating to localhost/127.0.0.1:41007 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-801480977-172.17.0.2-1734110465550 (Datanode Uuid 80931abb-de8e-4067-9546-08521da12df6) service to localhost/127.0.0.1:41007 2024-12-13T17:22:10,790 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:22:10,791 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data3/current/BP-801480977-172.17.0.2-1734110465550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:22:10,791 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data4/current/BP-801480977-172.17.0.2-1734110465550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:22:10,791 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:22:10,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a7e2505{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:22:10,799 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dc3f173{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:22:10,799 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:22:10,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72f6e6ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:22:10,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56d95970{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir/,STOPPED} 2024-12-13T17:22:10,801 WARN [BP-801480977-172.17.0.2-1734110465550 heartbeating to localhost/127.0.0.1:41007 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:22:10,801 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:22:10,801 WARN [BP-801480977-172.17.0.2-1734110465550 heartbeating to localhost/127.0.0.1:41007 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-801480977-172.17.0.2-1734110465550 (Datanode Uuid 48441be1-7355-41f3-b362-a1ea369ab017) service to localhost/127.0.0.1:41007 2024-12-13T17:22:10,801 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:22:10,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data1/current/BP-801480977-172.17.0.2-1734110465550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:22:10,802 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/cluster_b3d8cb9d-656a-e795-527f-cec2f0ae7763/dfs/data/data2/current/BP-801480977-172.17.0.2-1734110465550 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:22:10,802 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:22:10,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62f70d3c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:22:10,807 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54da9c94{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:22:10,807 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:22:10,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3334e3c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:22:10,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64ec3f26{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir/,STOPPED} 2024-12-13T17:22:10,812 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T17:22:10,830 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-13T17:22:10,835 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=113 (was 99) - Thread LEAK? -, OpenFileDescriptor=464 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=241 (was 117) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2952 (was 3485) 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=114, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=241, ProcessCount=11, AvailableMemoryMB=2952 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.log.dir so I do NOT create it in target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/43cf1249-92d6-8101-a20e-2faf9c3f86f3/hadoop.tmp.dir so I do NOT create it in target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5, deleteOnExit=true 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/test.cache.data in system properties and HBase conf 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir in system properties and HBase conf 2024-12-13T17:22:10,841 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T17:22:10,842 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/nfs.dump.dir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/java.io.tmpdir in system properties and HBase conf 2024-12-13T17:22:10,842 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:22:10,843 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T17:22:10,843 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T17:22:10,854 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:22:10,888 INFO [regionserver/bcc0a2319104:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:22:10,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:11,108 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:22:11,111 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:22:11,112 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:22:11,112 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:22:11,112 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:22:11,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:22:11,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@83c844a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:22:11,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b21935c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:22:11,203 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30af8f97{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/java.io.tmpdir/jetty-localhost-37005-hadoop-hdfs-3_4_1-tests_jar-_-any-9254243083952711280/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:22:11,204 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ecfeab9{HTTP/1.1, (http/1.1)}{localhost:37005} 2024-12-13T17:22:11,204 INFO [Time-limited test {}] server.Server(415): Started @303931ms 2024-12-13T17:22:11,215 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:22:11,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:22:11,368 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:22:11,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:22:11,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:22:11,369 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:22:11,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@562ff434{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:22:11,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d7b6841{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:22:11,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47b1c855{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/java.io.tmpdir/jetty-localhost-40689-hadoop-hdfs-3_4_1-tests_jar-_-any-3192710618327594165/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:22:11,458 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@472b36ec{HTTP/1.1, (http/1.1)}{localhost:40689} 2024-12-13T17:22:11,458 INFO [Time-limited test {}] server.Server(415): Started @304186ms 2024-12-13T17:22:11,459 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:22:11,482 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:22:11,486 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:22:11,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:22:11,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:22:11,487 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:22:11,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@363ce62f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:22:11,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62dd1763{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:22:11,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c032588{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/java.io.tmpdir/jetty-localhost-35737-hadoop-hdfs-3_4_1-tests_jar-_-any-6639736521898616401/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:22:11,577 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@312ed15c{HTTP/1.1, (http/1.1)}{localhost:35737} 2024-12-13T17:22:11,577 INFO [Time-limited test {}] server.Server(415): Started @304305ms 2024-12-13T17:22:11,578 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:22:11,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:11,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:11,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:12,093 WARN [Thread-1741 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data2/current/BP-1985376407-172.17.0.2-1734110530861/current, will proceed with Du for space computation calculation, 2024-12-13T17:22:12,093 WARN [Thread-1740 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data1/current/BP-1985376407-172.17.0.2-1734110530861/current, will proceed with Du for space computation calculation, 2024-12-13T17:22:12,106 WARN [Thread-1704 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:22:12,109 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x881d254357738f4 with lease ID 0xbf9dc2b7f138b483: Processing first storage report for DS-eec8d2f5-f47e-4783-b7aa-58a359c90e96 from datanode DatanodeRegistration(127.0.0.1:40799, datanodeUuid=df3967f5-01fd-4f13-bd55-d7f27bffe7a3, infoPort=38517, infoSecurePort=0, ipcPort=42827, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861) 2024-12-13T17:22:12,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x881d254357738f4 with lease ID 0xbf9dc2b7f138b483: from storage DS-eec8d2f5-f47e-4783-b7aa-58a359c90e96 node DatanodeRegistration(127.0.0.1:40799, datanodeUuid=df3967f5-01fd-4f13-bd55-d7f27bffe7a3, infoPort=38517, infoSecurePort=0, ipcPort=42827, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:22:12,109 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x881d254357738f4 with lease ID 0xbf9dc2b7f138b483: Processing first storage report for DS-85cac17c-1eeb-4f49-8a42-7d6edec52b14 from datanode DatanodeRegistration(127.0.0.1:40799, datanodeUuid=df3967f5-01fd-4f13-bd55-d7f27bffe7a3, infoPort=38517, infoSecurePort=0, ipcPort=42827, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861) 2024-12-13T17:22:12,109 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x881d254357738f4 with lease ID 0xbf9dc2b7f138b483: from storage DS-85cac17c-1eeb-4f49-8a42-7d6edec52b14 node DatanodeRegistration(127.0.0.1:40799, datanodeUuid=df3967f5-01fd-4f13-bd55-d7f27bffe7a3, infoPort=38517, infoSecurePort=0, ipcPort=42827, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:22:12,282 WARN [Thread-1752 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data3/current/BP-1985376407-172.17.0.2-1734110530861/current, will proceed with Du for space computation calculation, 2024-12-13T17:22:12,282 WARN [Thread-1753 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data4/current/BP-1985376407-172.17.0.2-1734110530861/current, will proceed with Du for space computation calculation, 2024-12-13T17:22:12,299 WARN [Thread-1727 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:22:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21ea22d01a88a41d with lease ID 0xbf9dc2b7f138b484: Processing first storage report for DS-21b56327-1d70-40b7-b88e-7588f7579637 from datanode DatanodeRegistration(127.0.0.1:44197, datanodeUuid=4f614c03-80b7-4739-8b89-5c9b71ab2255, infoPort=37337, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861) 2024-12-13T17:22:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21ea22d01a88a41d with lease ID 0xbf9dc2b7f138b484: from storage DS-21b56327-1d70-40b7-b88e-7588f7579637 node DatanodeRegistration(127.0.0.1:44197, datanodeUuid=4f614c03-80b7-4739-8b89-5c9b71ab2255, infoPort=37337, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:22:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21ea22d01a88a41d with lease ID 0xbf9dc2b7f138b484: Processing first storage report for DS-566e0919-7422-42d5-94ec-0cea4b060684 from datanode DatanodeRegistration(127.0.0.1:44197, datanodeUuid=4f614c03-80b7-4739-8b89-5c9b71ab2255, infoPort=37337, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861) 2024-12-13T17:22:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21ea22d01a88a41d with lease ID 0xbf9dc2b7f138b484: from storage DS-566e0919-7422-42d5-94ec-0cea4b060684 node DatanodeRegistration(127.0.0.1:44197, datanodeUuid=4f614c03-80b7-4739-8b89-5c9b71ab2255, infoPort=37337, infoSecurePort=0, ipcPort=40719, storageInfo=lv=-57;cid=testClusterID;nsid=1454724685;c=1734110530861), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:22:12,311 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556 2024-12-13T17:22:12,313 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/zookeeper_0, clientPort=51612, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T17:22:12,314 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51612 2024-12-13T17:22:12,314 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:12,316 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:12,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:22:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:22:12,327 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426 with version=8 2024-12-13T17:22:12,327 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/hbase-staging 2024-12-13T17:22:12,328 INFO [Time-limited test {}] client.ConnectionUtils(129): master/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:22:12,329 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35795 2024-12-13T17:22:12,330 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:12,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:12,333 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35795 connecting to ZooKeeper ensemble=127.0.0.1:51612 2024-12-13T17:22:12,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:357950x0, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:22:12,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35795-0x1002069ef590000 connected 2024-12-13T17:22:12,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:22:12,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:22:12,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:22:12,451 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35795 2024-12-13T17:22:12,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35795 2024-12-13T17:22:12,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35795 2024-12-13T17:22:12,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35795 2024-12-13T17:22:12,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35795 2024-12-13T17:22:12,453 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426, hbase.cluster.distributed=false 2024-12-13T17:22:12,467 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:22:12,468 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35103 2024-12-13T17:22:12,469 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T17:22:12,469 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T17:22:12,470 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:12,471 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:12,473 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:35103 connecting to ZooKeeper ensemble=127.0.0.1:51612 2024-12-13T17:22:12,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351030x0, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:22:12,483 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35103-0x1002069ef590001 connected 2024-12-13T17:22:12,483 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:22:12,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:22:12,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:22:12,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35103 2024-12-13T17:22:12,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35103 2024-12-13T17:22:12,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35103 2024-12-13T17:22:12,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35103 2024-12-13T17:22:12,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35103 2024-12-13T17:22:12,487 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/bcc0a2319104,35795,1734110532328 2024-12-13T17:22:12,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:22:12,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:22:12,491 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bcc0a2319104,35795,1734110532328 2024-12-13T17:22:12,499 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bcc0a2319104:35795 2024-12-13T17:22:12,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:22:12,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:22:12,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,500 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:22:12,500 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bcc0a2319104,35795,1734110532328 from backup master directory 2024-12-13T17:22:12,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bcc0a2319104,35795,1734110532328 2024-12-13T17:22:12,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:22:12,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:22:12,508 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:22:12,508 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:22:12,508 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bcc0a2319104,35795,1734110532328 2024-12-13T17:22:12,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:22:12,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:22:12,520 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/hbase.id with ID: a973ffb5-ca73-48cf-86a5-24a2095688fd 2024-12-13T17:22:12,529 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:12,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:22:12,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:22:12,550 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:22:12,551 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T17:22:12,551 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:22:12,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:22:12,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:22:12,562 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store 2024-12-13T17:22:12,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:22:12,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:22:12,571 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:12,571 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:22:12,571 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:12,571 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:12,571 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:22:12,571 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:12,571 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:22:12,571 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:22:12,572 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/.initializing 2024-12-13T17:22:12,572 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/WALs/bcc0a2319104,35795,1734110532328 2024-12-13T17:22:12,576 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C35795%2C1734110532328, suffix=, logDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/WALs/bcc0a2319104,35795,1734110532328, archiveDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/oldWALs, maxLogs=10 2024-12-13T17:22:12,577 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35795%2C1734110532328.1734110532577 2024-12-13T17:22:12,585 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/WALs/bcc0a2319104,35795,1734110532328/bcc0a2319104%2C35795%2C1734110532328.1734110532577 2024-12-13T17:22:12,586 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37337:37337),(127.0.0.1/127.0.0.1:38517:38517)] 2024-12-13T17:22:12,586 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:22:12,586 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:12,586 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,586 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,588 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T17:22:12,589 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:22:12,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T17:22:12,591 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:22:12,592 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T17:22:12,593 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:22:12,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T17:22:12,595 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:22:12,596 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,596 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,598 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T17:22:12,600 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:22:12,601 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:22:12,602 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707071, jitterRate=-0.10091298818588257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T17:22:12,602 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:22:12,602 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T17:22:12,605 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a49e29a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:22:12,606 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T17:22:12,606 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T17:22:12,606 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T17:22:12,606 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T17:22:12,607 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-13T17:22:12,607 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-13T17:22:12,607 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T17:22:12,608 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T17:22:12,609 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T17:22:12,616 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T17:22:12,616 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T17:22:12,617 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T17:22:12,624 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T17:22:12,624 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T17:22:12,625 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T17:22:12,632 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T17:22:12,633 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T17:22:12,641 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T17:22:12,642 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T17:22:12,649 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T17:22:12,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:22:12,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:22:12,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,658 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=bcc0a2319104,35795,1734110532328, sessionid=0x1002069ef590000, setting cluster-up flag (Was=false) 2024-12-13T17:22:12,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,699 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T17:22:12,702 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,35795,1734110532328 2024-12-13T17:22:12,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:12,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:12,741 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T17:22:12,742 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,35795,1734110532328 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T17:22:12,745 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T17:22:12,745 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bcc0a2319104,35795,1734110532328 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bcc0a2319104:0, corePoolSize=10, maxPoolSize=10 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,745 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:22:12,746 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,746 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734110562746 2024-12-13T17:22:12,746 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T17:22:12,746 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T17:22:12,747 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:22:12,747 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T17:22:12,747 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T17:22:12,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T17:22:12,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T17:22:12,748 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,748 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110532748,5,FailOnTimeoutGroup] 2024-12-13T17:22:12,748 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110532748,5,FailOnTimeoutGroup] 2024-12-13T17:22:12,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T17:22:12,748 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:22:12,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,748 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:22:12,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:22:12,756 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T17:22:12,756 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426 2024-12-13T17:22:12,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:22:12,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:22:12,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:12,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:22:12,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:22:12,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:22:12,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:22:12,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:22:12,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:22:12,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:22:12,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:22:12,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:12,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:22:12,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740 2024-12-13T17:22:12,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740 2024-12-13T17:22:12,772 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:22:12,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:12,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:22:12,775 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:22:12,776 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832538, jitterRate=0.058627933263778687}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:22:12,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:22:12,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:22:12,776 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:22:12,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:22:12,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:22:12,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:22:12,776 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:22:12,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:22:12,777 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:22:12,777 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T17:22:12,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T17:22:12,778 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T17:22:12,779 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T17:22:12,799 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bcc0a2319104:35103 2024-12-13T17:22:12,800 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1008): ClusterId : a973ffb5-ca73-48cf-86a5-24a2095688fd 2024-12-13T17:22:12,800 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T17:22:12,808 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T17:22:12,808 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T17:22:12,816 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T17:22:12,817 DEBUG [RS:0;bcc0a2319104:35103 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64da76b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:22:12,817 DEBUG [RS:0;bcc0a2319104:35103 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50228e35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:22:12,817 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T17:22:12,817 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T17:22:12,817 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T17:22:12,817 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(3073): reportForDuty to master=bcc0a2319104,35795,1734110532328 with isa=bcc0a2319104/172.17.0.2:35103, startcode=1734110532467 2024-12-13T17:22:12,817 DEBUG [RS:0;bcc0a2319104:35103 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:22:12,819 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55241, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:22:12,819 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35795 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bcc0a2319104,35103,1734110532467 2024-12-13T17:22:12,820 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35795 {}] master.ServerManager(486): Registering regionserver=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:12,821 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426 2024-12-13T17:22:12,821 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41263 2024-12-13T17:22:12,821 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T17:22:12,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:22:12,832 DEBUG [RS:0;bcc0a2319104:35103 {}] zookeeper.ZKUtil(111): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bcc0a2319104,35103,1734110532467 2024-12-13T17:22:12,832 WARN [RS:0;bcc0a2319104:35103 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:22:12,832 INFO [RS:0;bcc0a2319104:35103 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:22:12,832 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467 2024-12-13T17:22:12,832 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bcc0a2319104,35103,1734110532467] 2024-12-13T17:22:12,836 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T17:22:12,837 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T17:22:12,839 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T17:22:12,839 INFO [RS:0;bcc0a2319104:35103 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:22:12,839 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,839 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T17:22:12,841 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:22:12,841 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:22:12,842 DEBUG [RS:0;bcc0a2319104:35103 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:22:12,842 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,842 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,842 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,842 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,843 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35103,1734110532467-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:22:12,856 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T17:22:12,856 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35103,1734110532467-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:12,867 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.Replication(204): bcc0a2319104,35103,1734110532467 started 2024-12-13T17:22:12,867 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1767): Serving as bcc0a2319104,35103,1734110532467, RpcServer on bcc0a2319104/172.17.0.2:35103, sessionid=0x1002069ef590001 2024-12-13T17:22:12,868 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T17:22:12,868 DEBUG [RS:0;bcc0a2319104:35103 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bcc0a2319104,35103,1734110532467 2024-12-13T17:22:12,868 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,35103,1734110532467' 2024-12-13T17:22:12,868 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T17:22:12,868 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T17:22:12,869 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T17:22:12,869 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T17:22:12,869 DEBUG [RS:0;bcc0a2319104:35103 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bcc0a2319104,35103,1734110532467 2024-12-13T17:22:12,869 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,35103,1734110532467' 2024-12-13T17:22:12,869 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T17:22:12,869 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T17:22:12,870 DEBUG [RS:0;bcc0a2319104:35103 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T17:22:12,870 INFO [RS:0;bcc0a2319104:35103 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T17:22:12,870 INFO [RS:0;bcc0a2319104:35103 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T17:22:12,929 WARN [bcc0a2319104:35795 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T17:22:12,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:12,974 INFO [RS:0;bcc0a2319104:35103 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C35103%2C1734110532467, suffix=, logDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467, archiveDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/oldWALs, maxLogs=32 2024-12-13T17:22:12,975 INFO [RS:0;bcc0a2319104:35103 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35103%2C1734110532467.1734110532975 2024-12-13T17:22:12,983 INFO [RS:0;bcc0a2319104:35103 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110532975 2024-12-13T17:22:12,983 DEBUG [RS:0;bcc0a2319104:35103 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38517:38517),(127.0.0.1/127.0.0.1:37337:37337)] 2024-12-13T17:22:13,180 DEBUG [bcc0a2319104:35795 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-13T17:22:13,180 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:13,182 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,35103,1734110532467, state=OPENING 2024-12-13T17:22:13,191 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T17:22:13,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:13,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:13,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=bcc0a2319104,35103,1734110532467}] 2024-12-13T17:22:13,200 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:22:13,200 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:22:13,352 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35103,1734110532467 2024-12-13T17:22:13,352 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T17:22:13,353 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T17:22:13,357 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T17:22:13,357 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:22:13,359 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C35103%2C1734110532467.meta, suffix=.meta, logDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467, archiveDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/oldWALs, maxLogs=32 2024-12-13T17:22:13,359 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35103%2C1734110532467.meta.1734110533359.meta 2024-12-13T17:22:13,364 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.meta.1734110533359.meta 2024-12-13T17:22:13,364 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38517:38517),(127.0.0.1/127.0.0.1:37337:37337)] 2024-12-13T17:22:13,364 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:22:13,364 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T17:22:13,364 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T17:22:13,364 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T17:22:13,365 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T17:22:13,365 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:13,365 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T17:22:13,365 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T17:22:13,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:22:13,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:22:13,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:13,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:22:13,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:22:13,368 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:22:13,368 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:13,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:22:13,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:22:13,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:22:13,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:13,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:22:13,370 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740 2024-12-13T17:22:13,371 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740 2024-12-13T17:22:13,373 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:22:13,374 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:22:13,374 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822138, jitterRate=0.045402660965919495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:22:13,374 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:22:13,375 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734110533352 2024-12-13T17:22:13,377 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T17:22:13,377 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T17:22:13,377 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:13,378 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,35103,1734110532467, state=OPEN 2024-12-13T17:22:13,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:22:13,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:22:13,442 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:22:13,442 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:22:13,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T17:22:13,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=bcc0a2319104,35103,1734110532467 in 242 msec 2024-12-13T17:22:13,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T17:22:13,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 667 msec 2024-12-13T17:22:13,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 702 msec 2024-12-13T17:22:13,447 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734110533447, completionTime=-1 2024-12-13T17:22:13,447 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-13T17:22:13,447 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T17:22:13,448 DEBUG [hconnection-0x34394c48-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:22:13,449 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:22:13,450 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-13T17:22:13,450 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734110593450 2024-12-13T17:22:13,450 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734110653450 2024-12-13T17:22:13,450 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-13T17:22:13,475 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35795,1734110532328-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:13,475 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35795,1734110532328-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:13,475 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35795,1734110532328-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:13,475 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bcc0a2319104:35795, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:13,475 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:13,475 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T17:22:13,476 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:22:13,477 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T17:22:13,477 DEBUG [master/bcc0a2319104:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T17:22:13,478 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:22:13,478 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:13,479 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:22:13,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:22:13,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:22:13,489 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 704a36f7d1b3ea16a3462969bad1e4d2, NAME => 'hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426 2024-12-13T17:22:13,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:22:13,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:22:13,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:13,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 704a36f7d1b3ea16a3462969bad1e4d2, disabling compactions & flushes 2024-12-13T17:22:13,498 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:22:13,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:22:13,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. after waiting 0 ms 2024-12-13T17:22:13,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:22:13,498 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:22:13,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 704a36f7d1b3ea16a3462969bad1e4d2: 2024-12-13T17:22:13,499 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:22:13,500 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734110533499"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110533499"}]},"ts":"1734110533499"} 2024-12-13T17:22:13,502 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:22:13,502 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:22:13,503 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110533503"}]},"ts":"1734110533503"} 2024-12-13T17:22:13,504 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T17:22:13,524 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=704a36f7d1b3ea16a3462969bad1e4d2, ASSIGN}] 2024-12-13T17:22:13,525 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=704a36f7d1b3ea16a3462969bad1e4d2, ASSIGN 2024-12-13T17:22:13,526 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=704a36f7d1b3ea16a3462969bad1e4d2, ASSIGN; state=OFFLINE, location=bcc0a2319104,35103,1734110532467; forceNewPlan=false, retain=false 2024-12-13T17:22:13,677 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=704a36f7d1b3ea16a3462969bad1e4d2, regionState=OPENING, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:13,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 704a36f7d1b3ea16a3462969bad1e4d2, server=bcc0a2319104,35103,1734110532467}] 2024-12-13T17:22:13,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:13,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:13,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35103,1734110532467 2024-12-13T17:22:13,841 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:22:13,842 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 704a36f7d1b3ea16a3462969bad1e4d2, NAME => 'hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:22:13,842 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:22:13,842 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:13,843 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:22:13,843 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:22:13,845 INFO [StoreOpener-704a36f7d1b3ea16a3462969bad1e4d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:22:13,846 INFO [StoreOpener-704a36f7d1b3ea16a3462969bad1e4d2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 704a36f7d1b3ea16a3462969bad1e4d2 columnFamilyName info 2024-12-13T17:22:13,846 DEBUG [StoreOpener-704a36f7d1b3ea16a3462969bad1e4d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:13,847 INFO [StoreOpener-704a36f7d1b3ea16a3462969bad1e4d2-1 {}] regionserver.HStore(327): Store=704a36f7d1b3ea16a3462969bad1e4d2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:22:13,847 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:22:13,848 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:22:13,850 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:22:13,852 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:22:13,853 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 704a36f7d1b3ea16a3462969bad1e4d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855248, jitterRate=0.08750505745410919}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:22:13,853 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 704a36f7d1b3ea16a3462969bad1e4d2: 2024-12-13T17:22:13,854 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2., pid=6, masterSystemTime=1734110533834 2024-12-13T17:22:13,855 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:22:13,856 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:22:13,856 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=704a36f7d1b3ea16a3462969bad1e4d2, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:13,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T17:22:13,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 704a36f7d1b3ea16a3462969bad1e4d2, server=bcc0a2319104,35103,1734110532467 in 178 msec 2024-12-13T17:22:13,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T17:22:13,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=704a36f7d1b3ea16a3462969bad1e4d2, ASSIGN in 335 msec 2024-12-13T17:22:13,861 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:22:13,861 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110533861"}]},"ts":"1734110533861"} 2024-12-13T17:22:13,863 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T17:22:13,900 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T17:22:13,900 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:22:13,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 424 msec 2024-12-13T17:22:13,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:22:13,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:13,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:22:13,912 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T17:22:13,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:22:13,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-12-13T17:22:13,945 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T17:22:13,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:13,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:22:13,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 23 msec 2024-12-13T17:22:13,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T17:22:14,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.499sec 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35795,1734110532328-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:22:14,008 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35795,1734110532328-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T17:22:14,009 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T17:22:14,009 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T17:22:14,009 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,35795,1734110532328-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:22:14,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-13T17:22:14,090 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ed470fc to 127.0.0.1:51612 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12ab8c0f 2024-12-13T17:22:14,100 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d633db6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:22:14,102 DEBUG [hconnection-0x1ab7700c-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:22:14,103 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:22:14,105 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=bcc0a2319104,35795,1734110532328 2024-12-13T17:22:14,105 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:22:14,107 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-13T17:22:14,108 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-13T17:22:14,110 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38178, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-13T17:22:14,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35795 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-13T17:22:14,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35795 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-13T17:22:14,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35795 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:22:14,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35795 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-13T17:22:14,114 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:22:14,114 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:14,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35795 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-13T17:22:14,115 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:22:14,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35795 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:22:14,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741837_1013 (size=381) 2024-12-13T17:22:14,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741837_1013 (size=381) 2024-12-13T17:22:14,123 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => db80f3d14c7da4090fc6ab00d964b5ae, NAME => 'TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426 2024-12-13T17:22:14,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741838_1014 (size=64) 2024-12-13T17:22:14,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741838_1014 (size=64) 2024-12-13T17:22:14,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:14,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing db80f3d14c7da4090fc6ab00d964b5ae, disabling compactions & flushes 2024-12-13T17:22:14,131 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:14,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:14,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. after waiting 0 ms 2024-12-13T17:22:14,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:14,131 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:14,131 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:14,132 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:22:14,133 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1734110534132"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110534132"}]},"ts":"1734110534132"} 2024-12-13T17:22:14,134 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:22:14,135 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:22:14,135 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110534135"}]},"ts":"1734110534135"} 2024-12-13T17:22:14,136 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-13T17:22:14,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db80f3d14c7da4090fc6ab00d964b5ae, ASSIGN}] 2024-12-13T17:22:14,150 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db80f3d14c7da4090fc6ab00d964b5ae, ASSIGN 2024-12-13T17:22:14,151 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db80f3d14c7da4090fc6ab00d964b5ae, ASSIGN; state=OFFLINE, location=bcc0a2319104,35103,1734110532467; forceNewPlan=false, retain=false 2024-12-13T17:22:14,301 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=db80f3d14c7da4090fc6ab00d964b5ae, regionState=OPENING, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:14,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467}] 2024-12-13T17:22:14,457 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35103,1734110532467 2024-12-13T17:22:14,466 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:14,467 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => db80f3d14c7da4090fc6ab00d964b5ae, NAME => 'TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:22:14,467 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:14,467 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:14,468 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:14,468 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:14,471 INFO [StoreOpener-db80f3d14c7da4090fc6ab00d964b5ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:14,473 INFO [StoreOpener-db80f3d14c7da4090fc6ab00d964b5ae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db80f3d14c7da4090fc6ab00d964b5ae columnFamilyName info 2024-12-13T17:22:14,473 DEBUG [StoreOpener-db80f3d14c7da4090fc6ab00d964b5ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:14,474 INFO [StoreOpener-db80f3d14c7da4090fc6ab00d964b5ae-1 {}] regionserver.HStore(327): Store=db80f3d14c7da4090fc6ab00d964b5ae/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:22:14,475 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:14,475 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:14,477 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=11, reused chunk count=38, reuseRatio=77.55% 2024-12-13T17:22:14,478 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-13T17:22:14,479 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:14,482 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:22:14,483 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened db80f3d14c7da4090fc6ab00d964b5ae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878430, jitterRate=0.11698202788829803}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:22:14,483 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:14,484 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae., pid=11, masterSystemTime=1734110534457 2024-12-13T17:22:14,486 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:14,486 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:14,486 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=db80f3d14c7da4090fc6ab00d964b5ae, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:14,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-13T17:22:14,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467 in 184 msec 2024-12-13T17:22:14,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-13T17:22:14,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db80f3d14c7da4090fc6ab00d964b5ae, ASSIGN in 340 msec 2024-12-13T17:22:14,490 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:22:14,491 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110534491"}]},"ts":"1734110534491"} 2024-12-13T17:22:14,492 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-13T17:22:14,534 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:22:14,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 422 msec 2024-12-13T17:22:14,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:14,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:14,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:15,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:15,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:15,826 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:22:15,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:15,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:16,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:16,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:16,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:17,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:17,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:17,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:18,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:18,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:18,837 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T17:22:18,839 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T17:22:18,840 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-13T17:22:18,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:19,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:19,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:19,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:20,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:20,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:20,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:21,678 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:22:21,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:21,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:21,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:21,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:22,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:22,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:22,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:23,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:23,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:23,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:24,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-13T17:22:24,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-13T17:22:24,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35795 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-13T17:22:24,118 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-13T17:22:24,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-13T17:22:24,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:24,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:24,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db80f3d14c7da4090fc6ab00d964b5ae 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:22:24,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/6089e821cb6c402a8b10d4f38a026bf4 is 1080, key is row0001/info:/1734110544128/Put/seqid=0 2024-12-13T17:22:24,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741839_1015 (size=12509) 2024-12-13T17:22:24,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741839_1015 (size=12509) 2024-12-13T17:22:24,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/6089e821cb6c402a8b10d4f38a026bf4 2024-12-13T17:22:24,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T17:22:24,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37252 deadline: 1734110554165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:24,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/6089e821cb6c402a8b10d4f38a026bf4 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/6089e821cb6c402a8b10d4f38a026bf4 2024-12-13T17:22:24,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/6089e821cb6c402a8b10d4f38a026bf4, entries=7, sequenceid=11, filesize=12.2 K 2024-12-13T17:22:24,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for db80f3d14c7da4090fc6ab00d964b5ae in 36ms, sequenceid=11, compaction requested=false 2024-12-13T17:22:24,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:24,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:24,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:24,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:25,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:25,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:25,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:26,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:26,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:26,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:27,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:27,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:27,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:28,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:28,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:28,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:29,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:29,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:29,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:30,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:30,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:30,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:31,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:31,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:31,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:32,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:32,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:32,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:33,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:33,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:33,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:34,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:34,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db80f3d14c7da4090fc6ab00d964b5ae 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-13T17:22:34,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/cf1b106a3d0946338baa65332f048935 is 1080, key is row0008/info:/1734110544138/Put/seqid=0 2024-12-13T17:22:34,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741840_1016 (size=29761) 2024-12-13T17:22:34,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741840_1016 (size=29761) 2024-12-13T17:22:34,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/cf1b106a3d0946338baa65332f048935 2024-12-13T17:22:34,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/cf1b106a3d0946338baa65332f048935 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935 2024-12-13T17:22:34,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935, entries=23, sequenceid=37, filesize=29.1 K 2024-12-13T17:22:34,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for db80f3d14c7da4090fc6ab00d964b5ae in 29ms, sequenceid=37, compaction requested=false 2024-12-13T17:22:34,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:34,258 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-13T17:22:34,258 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:22:34,258 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935 because midkey is the same as first or last row 2024-12-13T17:22:34,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:34,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:34,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:35,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:35,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:35,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:36,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:36,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db80f3d14c7da4090fc6ab00d964b5ae 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:22:36,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/a7d93810b73945b29f0b8c2b15a56920 is 1080, key is row0031/info:/1734110554231/Put/seqid=0 2024-12-13T17:22:36,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T17:22:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37252 deadline: 1734110566280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:36,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741841_1017 (size=12509) 2024-12-13T17:22:36,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741841_1017 (size=12509) 2024-12-13T17:22:36,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/a7d93810b73945b29f0b8c2b15a56920 2024-12-13T17:22:36,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/a7d93810b73945b29f0b8c2b15a56920 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/a7d93810b73945b29f0b8c2b15a56920 2024-12-13T17:22:36,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/a7d93810b73945b29f0b8c2b15a56920, entries=7, sequenceid=47, filesize=12.2 K 2024-12-13T17:22:36,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for db80f3d14c7da4090fc6ab00d964b5ae in 49ms, sequenceid=47, compaction requested=true 2024-12-13T17:22:36,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:36,296 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-13T17:22:36,296 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:22:36,296 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935 because midkey is the same as first or last row 2024-12-13T17:22:36,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db80f3d14c7da4090fc6ab00d964b5ae:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:22:36,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:36,296 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:22:36,297 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:22:36,298 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): db80f3d14c7da4090fc6ab00d964b5ae/info is initiating minor compaction (all files) 2024-12-13T17:22:36,298 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db80f3d14c7da4090fc6ab00d964b5ae/info in TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:36,298 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/6089e821cb6c402a8b10d4f38a026bf4, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/a7d93810b73945b29f0b8c2b15a56920] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp, totalSize=53.5 K 2024-12-13T17:22:36,298 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6089e821cb6c402a8b10d4f38a026bf4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1734110544128 2024-12-13T17:22:36,298 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf1b106a3d0946338baa65332f048935, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734110544138 2024-12-13T17:22:36,299 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7d93810b73945b29f0b8c2b15a56920, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1734110554231 2024-12-13T17:22:36,313 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db80f3d14c7da4090fc6ab00d964b5ae#info#compaction#44 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:22:36,313 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/0e39c8c6d4ab46f9857dc85754e1a2e3 is 1080, key is row0001/info:/1734110544128/Put/seqid=0 2024-12-13T17:22:36,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741842_1018 (size=44978) 2024-12-13T17:22:36,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741842_1018 (size=44978) 2024-12-13T17:22:36,323 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/0e39c8c6d4ab46f9857dc85754e1a2e3 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3 2024-12-13T17:22:36,329 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db80f3d14c7da4090fc6ab00d964b5ae/info of db80f3d14c7da4090fc6ab00d964b5ae into 0e39c8c6d4ab46f9857dc85754e1a2e3(size=43.9 K), total size for store is 43.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:22:36,329 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:36,329 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae., storeName=db80f3d14c7da4090fc6ab00d964b5ae/info, priority=13, startTime=1734110556296; duration=0sec 2024-12-13T17:22:36,329 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=43.9 K, sizeToCheck=16.0 K 2024-12-13T17:22:36,329 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:22:36,329 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3 because midkey is the same as first or last row 2024-12-13T17:22:36,329 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:36,329 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db80f3d14c7da4090fc6ab00d964b5ae:info 2024-12-13T17:22:36,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:36,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:36,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 after 196178ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:22:36,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:37,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:37,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:37,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:38,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:38,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:38,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:39,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:39,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:39,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:40,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:40,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:40,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:41,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:41,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:41,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:42,311 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:22:42,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:42,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:42,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:43,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:43,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:43,817 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:22:43,821 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:22:43,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:44,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:44,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:44,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:45,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:45,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:45,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:46,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db80f3d14c7da4090fc6ab00d964b5ae 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-13T17:22:46,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/d5e346a78732490099a7b3422ea2703d is 1080, key is row0038/info:/1734110556248/Put/seqid=0 2024-12-13T17:22:46,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741843_1019 (size=29761) 2024-12-13T17:22:46,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741843_1019 (size=29761) 2024-12-13T17:22:46,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/d5e346a78732490099a7b3422ea2703d 2024-12-13T17:22:46,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/d5e346a78732490099a7b3422ea2703d as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/d5e346a78732490099a7b3422ea2703d 2024-12-13T17:22:46,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/d5e346a78732490099a7b3422ea2703d, entries=23, sequenceid=74, filesize=29.1 K 2024-12-13T17:22:46,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for db80f3d14c7da4090fc6ab00d964b5ae in 27ms, sequenceid=74, compaction requested=false 2024-12-13T17:22:46,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:46,355 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=73.0 K, sizeToCheck=16.0 K 2024-12-13T17:22:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:22:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3 because midkey is the same as first or last row 2024-12-13T17:22:46,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:46,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:46,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:47,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:47,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:47,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:48,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db80f3d14c7da4090fc6ab00d964b5ae 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:22:48,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/5f3a30d499cd423aba0a9f24063c4738 is 1080, key is row0061/info:/1734110566330/Put/seqid=0 2024-12-13T17:22:48,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741844_1020 (size=12509) 2024-12-13T17:22:48,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741844_1020 (size=12509) 2024-12-13T17:22:48,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/5f3a30d499cd423aba0a9f24063c4738 2024-12-13T17:22:48,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/5f3a30d499cd423aba0a9f24063c4738 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/5f3a30d499cd423aba0a9f24063c4738 2024-12-13T17:22:48,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/5f3a30d499cd423aba0a9f24063c4738, entries=7, sequenceid=84, filesize=12.2 K 2024-12-13T17:22:48,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for db80f3d14c7da4090fc6ab00d964b5ae in 23ms, sequenceid=84, compaction requested=true 2024-12-13T17:22:48,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:48,370 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=85.2 K, sizeToCheck=16.0 K 2024-12-13T17:22:48,370 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:22:48,370 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3 because midkey is the same as first or last row 2024-12-13T17:22:48,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db80f3d14c7da4090fc6ab00d964b5ae:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:22:48,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:48,371 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:22:48,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db80f3d14c7da4090fc6ab00d964b5ae 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-13T17:22:48,372 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 87248 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:22:48,372 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): db80f3d14c7da4090fc6ab00d964b5ae/info is initiating minor compaction (all files) 2024-12-13T17:22:48,372 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db80f3d14c7da4090fc6ab00d964b5ae/info in TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:48,372 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/d5e346a78732490099a7b3422ea2703d, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/5f3a30d499cd423aba0a9f24063c4738] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp, totalSize=85.2 K 2024-12-13T17:22:48,373 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e39c8c6d4ab46f9857dc85754e1a2e3, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1734110544128 2024-12-13T17:22:48,373 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5e346a78732490099a7b3422ea2703d, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734110556248 2024-12-13T17:22:48,374 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f3a30d499cd423aba0a9f24063c4738, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1734110566330 2024-12-13T17:22:48,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/cdd6e29d515942cba2369ccb9c50fe2f is 1080, key is row0068/info:/1734110568348/Put/seqid=0 2024-12-13T17:22:48,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741845_1021 (size=24376) 2024-12-13T17:22:48,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741845_1021 (size=24376) 2024-12-13T17:22:48,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/cdd6e29d515942cba2369ccb9c50fe2f 2024-12-13T17:22:48,389 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db80f3d14c7da4090fc6ab00d964b5ae#info#compaction#48 average throughput is 22.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:22:48,390 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/e0a05809d54c4c938a8f1d1eef386c1f is 1080, key is row0001/info:/1734110544128/Put/seqid=0 2024-12-13T17:22:48,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/cdd6e29d515942cba2369ccb9c50fe2f as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cdd6e29d515942cba2369ccb9c50fe2f 2024-12-13T17:22:48,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741846_1022 (size=77532) 2024-12-13T17:22:48,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741846_1022 (size=77532) 2024-12-13T17:22:48,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cdd6e29d515942cba2369ccb9c50fe2f, entries=18, sequenceid=105, filesize=23.8 K 2024-12-13T17:22:48,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for db80f3d14c7da4090fc6ab00d964b5ae in 30ms, sequenceid=105, compaction requested=false 2024-12-13T17:22:48,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:48,401 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=109.0 K, sizeToCheck=16.0 K 2024-12-13T17:22:48,401 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:22:48,401 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3 because midkey is the same as first or last row 2024-12-13T17:22:48,404 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/e0a05809d54c4c938a8f1d1eef386c1f as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f 2024-12-13T17:22:48,411 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db80f3d14c7da4090fc6ab00d964b5ae/info of db80f3d14c7da4090fc6ab00d964b5ae into e0a05809d54c4c938a8f1d1eef386c1f(size=75.7 K), total size for store is 99.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:22:48,411 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:48,411 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae., storeName=db80f3d14c7da4090fc6ab00d964b5ae/info, priority=13, startTime=1734110568370; duration=0sec 2024-12-13T17:22:48,411 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=99.5 K, sizeToCheck=16.0 K 2024-12-13T17:22:48,411 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-13T17:22:48,412 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:48,412 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:48,412 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db80f3d14c7da4090fc6ab00d964b5ae:info 2024-12-13T17:22:48,413 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35795 {}] assignment.AssignmentManager(1346): Split request from bcc0a2319104,35103,1734110532467, parent={ENCODED => db80f3d14c7da4090fc6ab00d964b5ae, NAME => 'TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-13T17:22:48,418 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35795 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:48,422 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35795 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db80f3d14c7da4090fc6ab00d964b5ae, daughterA=8c3bc8ab6810cb0271b72345588a17d4, daughterB=73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:48,423 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db80f3d14c7da4090fc6ab00d964b5ae, daughterA=8c3bc8ab6810cb0271b72345588a17d4, daughterB=73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:48,423 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db80f3d14c7da4090fc6ab00d964b5ae, daughterA=8c3bc8ab6810cb0271b72345588a17d4, daughterB=73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:48,423 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db80f3d14c7da4090fc6ab00d964b5ae, daughterA=8c3bc8ab6810cb0271b72345588a17d4, daughterB=73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:48,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db80f3d14c7da4090fc6ab00d964b5ae, UNASSIGN}] 2024-12-13T17:22:48,430 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db80f3d14c7da4090fc6ab00d964b5ae, UNASSIGN 2024-12-13T17:22:48,431 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=db80f3d14c7da4090fc6ab00d964b5ae, regionState=CLOSING, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:48,433 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-13T17:22:48,433 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467}] 2024-12-13T17:22:48,589 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35103,1734110532467 2024-12-13T17:22:48,591 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,592 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-13T17:22:48,592 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing db80f3d14c7da4090fc6ab00d964b5ae, disabling compactions & flushes 2024-12-13T17:22:48,593 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:48,593 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:48,593 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. after waiting 0 ms 2024-12-13T17:22:48,593 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:48,593 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing db80f3d14c7da4090fc6ab00d964b5ae 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-13T17:22:48,601 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/b6fd3a5dd5e24b1cb16573153f20187b is 1080, key is row0086/info:/1734110568372/Put/seqid=0 2024-12-13T17:22:48,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741847_1023 (size=16817) 2024-12-13T17:22:48,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741847_1023 (size=16817) 2024-12-13T17:22:48,606 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/b6fd3a5dd5e24b1cb16573153f20187b 2024-12-13T17:22:48,610 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/.tmp/info/b6fd3a5dd5e24b1cb16573153f20187b as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/b6fd3a5dd5e24b1cb16573153f20187b 2024-12-13T17:22:48,616 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/b6fd3a5dd5e24b1cb16573153f20187b, entries=11, sequenceid=120, filesize=16.4 K 2024-12-13T17:22:48,617 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for db80f3d14c7da4090fc6ab00d964b5ae in 24ms, sequenceid=120, compaction requested=true 2024-12-13T17:22:48,618 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/6089e821cb6c402a8b10d4f38a026bf4, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/a7d93810b73945b29f0b8c2b15a56920, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/d5e346a78732490099a7b3422ea2703d, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/5f3a30d499cd423aba0a9f24063c4738] to archive 2024-12-13T17:22:48,619 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T17:22:48,621 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/6089e821cb6c402a8b10d4f38a026bf4 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/6089e821cb6c402a8b10d4f38a026bf4 2024-12-13T17:22:48,621 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cf1b106a3d0946338baa65332f048935 2024-12-13T17:22:48,622 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/0e39c8c6d4ab46f9857dc85754e1a2e3 2024-12-13T17:22:48,622 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/a7d93810b73945b29f0b8c2b15a56920 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/a7d93810b73945b29f0b8c2b15a56920 2024-12-13T17:22:48,622 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/d5e346a78732490099a7b3422ea2703d to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/d5e346a78732490099a7b3422ea2703d 2024-12-13T17:22:48,622 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/5f3a30d499cd423aba0a9f24063c4738 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/5f3a30d499cd423aba0a9f24063c4738 2024-12-13T17:22:48,626 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=1 2024-12-13T17:22:48,627 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. 2024-12-13T17:22:48,627 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for db80f3d14c7da4090fc6ab00d964b5ae: 2024-12-13T17:22:48,628 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,629 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=db80f3d14c7da4090fc6ab00d964b5ae, regionState=CLOSED 2024-12-13T17:22:48,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-13T17:22:48,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure db80f3d14c7da4090fc6ab00d964b5ae, server=bcc0a2319104,35103,1734110532467 in 197 msec 2024-12-13T17:22:48,632 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-13T17:22:48,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db80f3d14c7da4090fc6ab00d964b5ae, UNASSIGN in 202 msec 2024-12-13T17:22:48,639 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:48,641 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 3 storefiles, region=db80f3d14c7da4090fc6ab00d964b5ae, threads=3 2024-12-13T17:22:48,641 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/b6fd3a5dd5e24b1cb16573153f20187b for region: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,641 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cdd6e29d515942cba2369ccb9c50fe2f for region: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,642 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f for region: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,650 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cdd6e29d515942cba2369ccb9c50fe2f, top=true 2024-12-13T17:22:48,650 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/b6fd3a5dd5e24b1cb16573153f20187b, top=true 2024-12-13T17:22:48,655 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b for child: 73f05a02f16845d2468f6082d7912f66, parent: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,655 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f for child: 73f05a02f16845d2468f6082d7912f66, parent: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,655 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/cdd6e29d515942cba2369ccb9c50fe2f for region: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,655 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/b6fd3a5dd5e24b1cb16573153f20187b for region: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741848_1024 (size=27) 2024-12-13T17:22:48,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741848_1024 (size=27) 2024-12-13T17:22:48,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741849_1025 (size=27) 2024-12-13T17:22:48,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741849_1025 (size=27) 2024-12-13T17:22:48,666 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f for region: db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:22:48,666 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region db80f3d14c7da4090fc6ab00d964b5ae Daughter A: [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae] storefiles, Daughter B: [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae] storefiles. 2024-12-13T17:22:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741850_1026 (size=71) 2024-12-13T17:22:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741850_1026 (size=71) 2024-12-13T17:22:48,675 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:48,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741851_1027 (size=71) 2024-12-13T17:22:48,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741851_1027 (size=71) 2024-12-13T17:22:48,688 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:48,697 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-12-13T17:22:48,699 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/recovered.edits/123.seqid, newMaxSeqId=123, maxSeqId=-1 2024-12-13T17:22:48,701 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1734110568701"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1734110568701"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1734110568701"}]},"ts":"1734110568701"} 2024-12-13T17:22:48,701 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1734110568701"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110568701"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1734110568701"}]},"ts":"1734110568701"} 2024-12-13T17:22:48,701 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1734110568701"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110568701"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1734110568701"}]},"ts":"1734110568701"} 2024-12-13T17:22:48,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35103 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-13T17:22:48,724 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-13T17:22:48,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-13T17:22:48,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8c3bc8ab6810cb0271b72345588a17d4, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73f05a02f16845d2468f6082d7912f66, ASSIGN}] 2024-12-13T17:22:48,729 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73f05a02f16845d2468f6082d7912f66, ASSIGN 2024-12-13T17:22:48,729 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8c3bc8ab6810cb0271b72345588a17d4, ASSIGN 2024-12-13T17:22:48,730 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73f05a02f16845d2468f6082d7912f66, ASSIGN; state=SPLITTING_NEW, location=bcc0a2319104,35103,1734110532467; forceNewPlan=false, retain=false 2024-12-13T17:22:48,730 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8c3bc8ab6810cb0271b72345588a17d4, ASSIGN; state=SPLITTING_NEW, location=bcc0a2319104,35103,1734110532467; forceNewPlan=false, retain=false 2024-12-13T17:22:48,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/info/f3c5dad956354a6f911909392b2d3473 is 193, key is TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66./info:regioninfo/1734110568701/Put/seqid=0 2024-12-13T17:22:48,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741852_1028 (size=9423) 2024-12-13T17:22:48,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741852_1028 (size=9423) 2024-12-13T17:22:48,744 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/info/f3c5dad956354a6f911909392b2d3473 2024-12-13T17:22:48,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/table/9389450f679f48f393f3149bdb270663 is 65, key is TestLogRolling-testLogRolling/table:state/1734110534491/Put/seqid=0 2024-12-13T17:22:48,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741853_1029 (size=5412) 2024-12-13T17:22:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741853_1029 (size=5412) 2024-12-13T17:22:48,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/table/9389450f679f48f393f3149bdb270663 2024-12-13T17:22:48,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:48,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/info/f3c5dad956354a6f911909392b2d3473 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/info/f3c5dad956354a6f911909392b2d3473 2024-12-13T17:22:48,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/info/f3c5dad956354a6f911909392b2d3473, entries=29, sequenceid=17, filesize=9.2 K 2024-12-13T17:22:48,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/table/9389450f679f48f393f3149bdb270663 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/table/9389450f679f48f393f3149bdb270663 2024-12-13T17:22:48,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/table/9389450f679f48f393f3149bdb270663, entries=4, sequenceid=17, filesize=5.3 K 2024-12-13T17:22:48,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 59ms, sequenceid=17, compaction requested=false 2024-12-13T17:22:48,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-13T17:22:48,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:48,880 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=8c3bc8ab6810cb0271b72345588a17d4, regionState=OPENING, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:48,880 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=73f05a02f16845d2468f6082d7912f66, regionState=OPENING, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:48,884 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=15, state=RUNNABLE; OpenRegionProcedure 8c3bc8ab6810cb0271b72345588a17d4, server=bcc0a2319104,35103,1734110532467}] 2024-12-13T17:22:48,887 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=16, state=RUNNABLE; OpenRegionProcedure 73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467}] 2024-12-13T17:22:48,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:49,039 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,35103,1734110532467 2024-12-13T17:22:49,047 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:22:49,047 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => 73f05a02f16845d2468f6082d7912f66, NAME => 'TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-13T17:22:49,048 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:49,048 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:49,048 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:49,048 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:49,051 INFO [StoreOpener-73f05a02f16845d2468f6082d7912f66-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:49,053 INFO [StoreOpener-73f05a02f16845d2468f6082d7912f66-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73f05a02f16845d2468f6082d7912f66 columnFamilyName info 2024-12-13T17:22:49,053 DEBUG [StoreOpener-73f05a02f16845d2468f6082d7912f66-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:49,063 DEBUG [StoreOpener-73f05a02f16845d2468f6082d7912f66-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b 2024-12-13T17:22:49,069 DEBUG [StoreOpener-73f05a02f16845d2468f6082d7912f66-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f 2024-12-13T17:22:49,079 DEBUG [StoreOpener-73f05a02f16845d2468f6082d7912f66-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae->hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f-top 2024-12-13T17:22:49,079 INFO [StoreOpener-73f05a02f16845d2468f6082d7912f66-1 {}] regionserver.HStore(327): Store=73f05a02f16845d2468f6082d7912f66/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:22:49,081 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:49,082 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:49,085 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:22:49,086 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened 73f05a02f16845d2468f6082d7912f66; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740859, jitterRate=-0.05794923007488251}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:22:49,087 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:22:49,088 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., pid=18, masterSystemTime=1734110569039 2024-12-13T17:22:49,088 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store 73f05a02f16845d2468f6082d7912f66:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:22:49,088 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:49,088 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:22:49,089 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:22:49,089 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): 73f05a02f16845d2468f6082d7912f66/info is initiating minor compaction (all files) 2024-12-13T17:22:49,089 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73f05a02f16845d2468f6082d7912f66/info in TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:22:49,089 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:22:49,089 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:22:49,090 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:22:49,090 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae->hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f-top, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp, totalSize=115.9 K 2024-12-13T17:22:49,090 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => 8c3bc8ab6810cb0271b72345588a17d4, NAME => 'TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-13T17:22:49,090 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:22:49,090 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:22:49,090 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=73f05a02f16845d2468f6082d7912f66, regionState=OPEN, openSeqNum=124, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:49,090 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for 8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:22:49,090 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for 8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:22:49,090 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1734110544128 2024-12-13T17:22:49,091 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1734110568348 2024-12-13T17:22:49,091 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734110568372 2024-12-13T17:22:49,091 INFO [StoreOpener-8c3bc8ab6810cb0271b72345588a17d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:22:49,092 INFO [StoreOpener-8c3bc8ab6810cb0271b72345588a17d4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c3bc8ab6810cb0271b72345588a17d4 columnFamilyName info 2024-12-13T17:22:49,092 DEBUG [StoreOpener-8c3bc8ab6810cb0271b72345588a17d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:22:49,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=16 2024-12-13T17:22:49,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=16, state=SUCCESS; OpenRegionProcedure 73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467 in 206 msec 2024-12-13T17:22:49,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73f05a02f16845d2468f6082d7912f66, ASSIGN in 365 msec 2024-12-13T17:22:49,101 DEBUG [StoreOpener-8c3bc8ab6810cb0271b72345588a17d4-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae->hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f-bottom 2024-12-13T17:22:49,101 INFO [StoreOpener-8c3bc8ab6810cb0271b72345588a17d4-1 {}] regionserver.HStore(327): Store=8c3bc8ab6810cb0271b72345588a17d4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:22:49,102 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:22:49,103 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:22:49,105 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for 8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:22:49,106 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened 8c3bc8ab6810cb0271b72345588a17d4; next sequenceid=124; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716071, jitterRate=-0.08946871757507324}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:22:49,106 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for 8c3bc8ab6810cb0271b72345588a17d4: 2024-12-13T17:22:49,107 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4., pid=17, masterSystemTime=1734110569039 2024-12-13T17:22:49,107 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store 8c3bc8ab6810cb0271b72345588a17d4:info, priority=-2147483648, current under compaction store size is 2 2024-12-13T17:22:49,107 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:49,107 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-13T17:22:49,107 INFO [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:22:49,107 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.HStore(1540): 8c3bc8ab6810cb0271b72345588a17d4/info is initiating minor compaction (all files) 2024-12-13T17:22:49,107 INFO [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c3bc8ab6810cb0271b72345588a17d4/info in TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:22:49,108 INFO [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae->hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f-bottom] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/.tmp, totalSize=75.7 K 2024-12-13T17:22:49,108 DEBUG [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:22:49,108 INFO [RS_OPEN_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:22:49,108 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] compactions.Compactor(224): Compacting e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1734110544128 2024-12-13T17:22:49,109 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=8c3bc8ab6810cb0271b72345588a17d4, regionState=OPEN, openSeqNum=124, regionLocation=bcc0a2319104,35103,1734110532467 2024-12-13T17:22:49,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=15 2024-12-13T17:22:49,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=15, state=SUCCESS; OpenRegionProcedure 8c3bc8ab6810cb0271b72345588a17d4, server=bcc0a2319104,35103,1734110532467 in 226 msec 2024-12-13T17:22:49,113 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73f05a02f16845d2468f6082d7912f66#info#compaction#52 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:22:49,114 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/493295dd7e654643a97bce0254c1944b is 1080, key is row0062/info:/1734110566332/Put/seqid=0 2024-12-13T17:22:49,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=12 2024-12-13T17:22:49,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8c3bc8ab6810cb0271b72345588a17d4, ASSIGN in 384 msec 2024-12-13T17:22:49,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db80f3d14c7da4090fc6ab00d964b5ae, daughterA=8c3bc8ab6810cb0271b72345588a17d4, daughterB=73f05a02f16845d2468f6082d7912f66 in 696 msec 2024-12-13T17:22:49,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741854_1030 (size=42984) 2024-12-13T17:22:49,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741854_1030 (size=42984) 2024-12-13T17:22:49,126 INFO [RS:0;bcc0a2319104:35103-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c3bc8ab6810cb0271b72345588a17d4#info#compaction#53 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:22:49,126 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/.tmp/info/1d96a68dfdee4f11a545bff601cfcb20 is 1080, key is row0001/info:/1734110544128/Put/seqid=0 2024-12-13T17:22:49,128 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/493295dd7e654643a97bce0254c1944b as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/493295dd7e654643a97bce0254c1944b 2024-12-13T17:22:49,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741855_1031 (size=70862) 2024-12-13T17:22:49,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741855_1031 (size=70862) 2024-12-13T17:22:49,134 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-13T17:22:49,135 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73f05a02f16845d2468f6082d7912f66/info of 73f05a02f16845d2468f6082d7912f66 into 493295dd7e654643a97bce0254c1944b(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:22:49,135 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:22:49,135 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., storeName=73f05a02f16845d2468f6082d7912f66/info, priority=13, startTime=1734110569088; duration=0sec 2024-12-13T17:22:49,135 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:49,135 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73f05a02f16845d2468f6082d7912f66:info 2024-12-13T17:22:49,136 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/.tmp/info/1d96a68dfdee4f11a545bff601cfcb20 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/info/1d96a68dfdee4f11a545bff601cfcb20 2024-12-13T17:22:49,141 INFO [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in 8c3bc8ab6810cb0271b72345588a17d4/info of 8c3bc8ab6810cb0271b72345588a17d4 into 1d96a68dfdee4f11a545bff601cfcb20(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:22:49,141 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c3bc8ab6810cb0271b72345588a17d4: 2024-12-13T17:22:49,141 INFO [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4., storeName=8c3bc8ab6810cb0271b72345588a17d4/info, priority=15, startTime=1734110569107; duration=0sec 2024-12-13T17:22:49,141 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:22:49,141 DEBUG [RS:0;bcc0a2319104:35103-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c3bc8ab6810cb0271b72345588a17d4:info 2024-12-13T17:22:49,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:49,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:49,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:50,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37252 deadline: 1734110580389, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1734110534112.db80f3d14c7da4090fc6ab00d964b5ae. is not online on bcc0a2319104,35103,1734110532467 2024-12-13T17:22:50,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:50,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:50,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:51,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:51,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:51,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:52,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:52,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:52,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:53,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:53,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:53,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:53,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:53,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 after 196213ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:22:54,140 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-13T17:22:54,140 INFO [master/bcc0a2319104:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-13T17:22:54,163 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-13T17:22:54,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,193 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-13T17:22:54,598 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 4ae54d53dadb6f7d5f47a16f2daa01e3, had cached 0 bytes from a total of 23930 2024-12-13T17:22:54,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:54,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:54,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:55,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:55,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:55,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:56,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:56,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:56,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:57,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:57,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:57,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:58,365 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-13T17:22:58,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:58,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:58,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:59,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:59,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:22:59,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:00,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:23:00,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b818a57e2d284544965240ad8a1ea871 is 1080, key is row0097/info:/1734110580488/Put/seqid=0 2024-12-13T17:23:00,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741856_1032 (size=12515) 2024-12-13T17:23:00,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741856_1032 (size=12515) 2024-12-13T17:23:00,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b818a57e2d284544965240ad8a1ea871 2024-12-13T17:23:00,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b818a57e2d284544965240ad8a1ea871 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b818a57e2d284544965240ad8a1ea871 2024-12-13T17:23:00,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b818a57e2d284544965240ad8a1ea871, entries=7, sequenceid=134, filesize=12.2 K 2024-12-13T17:23:00,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 73f05a02f16845d2468f6082d7912f66 in 39ms, sequenceid=134, compaction requested=false 2024-12-13T17:23:00,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:00,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:00,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-13T17:23:00,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/607d6451eff84b8cb2ba87a9e9d817fa is 1080, key is row0104/info:/1734110580496/Put/seqid=0 2024-12-13T17:23:00,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741857_1033 (size=24394) 2024-12-13T17:23:00,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741857_1033 (size=24394) 2024-12-13T17:23:00,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/607d6451eff84b8cb2ba87a9e9d817fa 2024-12-13T17:23:00,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/607d6451eff84b8cb2ba87a9e9d817fa as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/607d6451eff84b8cb2ba87a9e9d817fa 2024-12-13T17:23:00,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/607d6451eff84b8cb2ba87a9e9d817fa, entries=18, sequenceid=155, filesize=23.8 K 2024-12-13T17:23:00,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=7.36 KB/7532 for 73f05a02f16845d2468f6082d7912f66 in 18ms, sequenceid=155, compaction requested=true 2024-12-13T17:23:00,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:00,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73f05a02f16845d2468f6082d7912f66:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:23:00,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:00,554 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:23:00,554 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 79893 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:23:00,555 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): 73f05a02f16845d2468f6082d7912f66/info is initiating minor compaction (all files) 2024-12-13T17:23:00,555 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73f05a02f16845d2468f6082d7912f66/info in TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:00,555 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/493295dd7e654643a97bce0254c1944b, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b818a57e2d284544965240ad8a1ea871, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/607d6451eff84b8cb2ba87a9e9d817fa] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp, totalSize=78.0 K 2024-12-13T17:23:00,555 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 493295dd7e654643a97bce0254c1944b, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734110566332 2024-12-13T17:23:00,555 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting b818a57e2d284544965240ad8a1ea871, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734110580488 2024-12-13T17:23:00,555 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 607d6451eff84b8cb2ba87a9e9d817fa, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734110580496 2024-12-13T17:23:00,565 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73f05a02f16845d2468f6082d7912f66#info#compaction#56 average throughput is 30.78 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:23:00,565 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/680ca5059e4948d797d211ed7e7ee45e is 1080, key is row0062/info:/1734110566332/Put/seqid=0 2024-12-13T17:23:00,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741858_1034 (size=70103) 2024-12-13T17:23:00,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741858_1034 (size=70103) 2024-12-13T17:23:00,574 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/680ca5059e4948d797d211ed7e7ee45e as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/680ca5059e4948d797d211ed7e7ee45e 2024-12-13T17:23:00,579 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73f05a02f16845d2468f6082d7912f66/info of 73f05a02f16845d2468f6082d7912f66 into 680ca5059e4948d797d211ed7e7ee45e(size=68.5 K), total size for store is 68.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:23:00,579 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:00,579 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., storeName=73f05a02f16845d2468f6082d7912f66/info, priority=13, startTime=1734110580553; duration=0sec 2024-12-13T17:23:00,579 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:00,579 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73f05a02f16845d2468f6082d7912f66:info 2024-12-13T17:23:00,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:00,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:00,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:01,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:01,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:01,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:02,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:02,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-13T17:23:02,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/ab844e02baca4c82bf8c6108daa8a9fd is 1080, key is row0122/info:/1734110580536/Put/seqid=0 2024-12-13T17:23:02,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741859_1035 (size=13594) 2024-12-13T17:23:02,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741859_1035 (size=13594) 2024-12-13T17:23:02,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/ab844e02baca4c82bf8c6108daa8a9fd 2024-12-13T17:23:02,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/ab844e02baca4c82bf8c6108daa8a9fd as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/ab844e02baca4c82bf8c6108daa8a9fd 2024-12-13T17:23:02,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/ab844e02baca4c82bf8c6108daa8a9fd, entries=8, sequenceid=167, filesize=13.3 K 2024-12-13T17:23:02,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=16.81 KB/17216 for 73f05a02f16845d2468f6082d7912f66 in 26ms, sequenceid=167, compaction requested=false 2024-12-13T17:23:02,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:02,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:02,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-13T17:23:02,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/f98dc69e24a54a3abc7060f4c078d8b1 is 1080, key is row0130/info:/1734110582549/Put/seqid=0 2024-12-13T17:23:02,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741860_1036 (size=24394) 2024-12-13T17:23:02,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741860_1036 (size=24394) 2024-12-13T17:23:02,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/f98dc69e24a54a3abc7060f4c078d8b1 2024-12-13T17:23:02,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T17:23:02,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37252 deadline: 1734110592588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467 2024-12-13T17:23:02,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/f98dc69e24a54a3abc7060f4c078d8b1 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/f98dc69e24a54a3abc7060f4c078d8b1 2024-12-13T17:23:02,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/f98dc69e24a54a3abc7060f4c078d8b1, entries=18, sequenceid=188, filesize=23.8 K 2024-12-13T17:23:02,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 73f05a02f16845d2468f6082d7912f66 in 21ms, sequenceid=188, compaction requested=true 2024-12-13T17:23:02,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:02,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73f05a02f16845d2468f6082d7912f66:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:23:02,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:02,596 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:23:02,597 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 108091 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:23:02,597 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): 73f05a02f16845d2468f6082d7912f66/info is initiating minor compaction (all files) 2024-12-13T17:23:02,597 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73f05a02f16845d2468f6082d7912f66/info in TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:02,597 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/680ca5059e4948d797d211ed7e7ee45e, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/ab844e02baca4c82bf8c6108daa8a9fd, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/f98dc69e24a54a3abc7060f4c078d8b1] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp, totalSize=105.6 K 2024-12-13T17:23:02,597 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 680ca5059e4948d797d211ed7e7ee45e, keycount=60, bloomtype=ROW, size=68.5 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734110566332 2024-12-13T17:23:02,598 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab844e02baca4c82bf8c6108daa8a9fd, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1734110580536 2024-12-13T17:23:02,598 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting f98dc69e24a54a3abc7060f4c078d8b1, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1734110582549 2024-12-13T17:23:02,610 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73f05a02f16845d2468f6082d7912f66#info#compaction#59 average throughput is 44.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:23:02,610 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/6dd2b472853e45d58e2dcbfe280e7b59 is 1080, key is row0062/info:/1734110566332/Put/seqid=0 2024-12-13T17:23:02,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741861_1037 (size=98310) 2024-12-13T17:23:02,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741861_1037 (size=98310) 2024-12-13T17:23:02,620 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/6dd2b472853e45d58e2dcbfe280e7b59 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/6dd2b472853e45d58e2dcbfe280e7b59 2024-12-13T17:23:02,626 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73f05a02f16845d2468f6082d7912f66/info of 73f05a02f16845d2468f6082d7912f66 into 6dd2b472853e45d58e2dcbfe280e7b59(size=96.0 K), total size for store is 96.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:23:02,626 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:02,626 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., storeName=73f05a02f16845d2468f6082d7912f66/info, priority=13, startTime=1734110582595; duration=0sec 2024-12-13T17:23:02,626 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:02,626 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73f05a02f16845d2468f6082d7912f66:info 2024-12-13T17:23:02,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:02,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:02,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:03,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:03,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:03,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:04,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:04,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:04,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:05,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:05,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:05,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:06,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:06,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:07,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:07,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:07,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:08,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:08,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:08,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:09,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:09,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:09,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:10,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:10,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:10,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:11,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:11,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:11,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:12,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:12,311 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-13T17:23:12,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:12,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-13T17:23:12,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/0d5cabfdc93041889ce27466e6c3c44f is 1080, key is row0148/info:/1734110582574/Put/seqid=0 2024-12-13T17:23:12,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741862_1038 (size=17906) 2024-12-13T17:23:12,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741862_1038 (size=17906) 2024-12-13T17:23:12,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/0d5cabfdc93041889ce27466e6c3c44f 2024-12-13T17:23:12,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/0d5cabfdc93041889ce27466e6c3c44f as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0d5cabfdc93041889ce27466e6c3c44f 2024-12-13T17:23:12,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0d5cabfdc93041889ce27466e6c3c44f, entries=12, sequenceid=204, filesize=17.5 K 2024-12-13T17:23:12,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 73f05a02f16845d2468f6082d7912f66 in 21ms, sequenceid=204, compaction requested=false 2024-12-13T17:23:12,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:12,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:12,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta after 196180ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:23:12,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:13,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:13,476 DEBUG [master/bcc0a2319104:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 704a36f7d1b3ea16a3462969bad1e4d2 changed from -1.0 to 0.0, refreshing cache 2024-12-13T17:23:13,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:13,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:14,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:14,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:14,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:23:14,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b4af8ac39e0146508c658818be73b4ba is 1080, key is row0160/info:/1734110592645/Put/seqid=0 2024-12-13T17:23:14,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741863_1039 (size=12516) 2024-12-13T17:23:14,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741863_1039 (size=12516) 2024-12-13T17:23:14,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b4af8ac39e0146508c658818be73b4ba 2024-12-13T17:23:14,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b4af8ac39e0146508c658818be73b4ba as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b4af8ac39e0146508c658818be73b4ba 2024-12-13T17:23:14,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b4af8ac39e0146508c658818be73b4ba, entries=7, sequenceid=214, filesize=12.2 K 2024-12-13T17:23:14,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 73f05a02f16845d2468f6082d7912f66 in 26ms, sequenceid=214, compaction requested=true 2024-12-13T17:23:14,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:14,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73f05a02f16845d2468f6082d7912f66:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:23:14,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:14,687 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:23:14,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:14,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-13T17:23:14,688 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128732 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:23:14,688 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): 73f05a02f16845d2468f6082d7912f66/info is initiating minor compaction (all files) 2024-12-13T17:23:14,688 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73f05a02f16845d2468f6082d7912f66/info in TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:14,688 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/6dd2b472853e45d58e2dcbfe280e7b59, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0d5cabfdc93041889ce27466e6c3c44f, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b4af8ac39e0146508c658818be73b4ba] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp, totalSize=125.7 K 2024-12-13T17:23:14,689 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dd2b472853e45d58e2dcbfe280e7b59, keycount=86, bloomtype=ROW, size=96.0 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1734110566332 2024-12-13T17:23:14,689 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d5cabfdc93041889ce27466e6c3c44f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1734110582574 2024-12-13T17:23:14,689 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4af8ac39e0146508c658818be73b4ba, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1734110592645 2024-12-13T17:23:14,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/014dfa59fc2a4e44955c78fd2dfe409e is 1080, key is row0167/info:/1734110594662/Put/seqid=0 2024-12-13T17:23:14,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741864_1040 (size=24394) 2024-12-13T17:23:14,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741864_1040 (size=24394) 2024-12-13T17:23:14,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/014dfa59fc2a4e44955c78fd2dfe409e 2024-12-13T17:23:14,703 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73f05a02f16845d2468f6082d7912f66#info#compaction#63 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:23:14,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/014dfa59fc2a4e44955c78fd2dfe409e as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/014dfa59fc2a4e44955c78fd2dfe409e 2024-12-13T17:23:14,703 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/c4d33e682cb340ca87d207bd6591daf1 is 1080, key is row0062/info:/1734110566332/Put/seqid=0 2024-12-13T17:23:14,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741865_1041 (size=118898) 2024-12-13T17:23:14,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741865_1041 (size=118898) 2024-12-13T17:23:14,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/014dfa59fc2a4e44955c78fd2dfe409e, entries=18, sequenceid=235, filesize=23.8 K 2024-12-13T17:23:14,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=8.41 KB/8608 for 73f05a02f16845d2468f6082d7912f66 in 22ms, sequenceid=235, compaction requested=false 2024-12-13T17:23:14,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:14,710 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/c4d33e682cb340ca87d207bd6591daf1 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c4d33e682cb340ca87d207bd6591daf1 2024-12-13T17:23:14,716 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73f05a02f16845d2468f6082d7912f66/info of 73f05a02f16845d2468f6082d7912f66 into c4d33e682cb340ca87d207bd6591daf1(size=116.1 K), total size for store is 139.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:23:14,716 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:14,716 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., storeName=73f05a02f16845d2468f6082d7912f66/info, priority=13, startTime=1734110594687; duration=0sec 2024-12-13T17:23:14,716 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:14,716 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73f05a02f16845d2468f6082d7912f66:info 2024-12-13T17:23:14,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:14,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:15,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:15,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:15,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:16,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:16,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:16,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-13T17:23:16,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/0ba8df0729fe4c8d9a96cd197e9f2f8c is 1080, key is row0185/info:/1734110594688/Put/seqid=0 2024-12-13T17:23:16,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741866_1042 (size=14672) 2024-12-13T17:23:16,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741866_1042 (size=14672) 2024-12-13T17:23:16,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/0ba8df0729fe4c8d9a96cd197e9f2f8c 2024-12-13T17:23:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/0ba8df0729fe4c8d9a96cd197e9f2f8c as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0ba8df0729fe4c8d9a96cd197e9f2f8c 2024-12-13T17:23:16,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0ba8df0729fe4c8d9a96cd197e9f2f8c, entries=9, sequenceid=248, filesize=14.3 K 2024-12-13T17:23:16,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T17:23:16,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37252 deadline: 1734110606724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467 2024-12-13T17:23:16,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=21.02 KB/21520 for 73f05a02f16845d2468f6082d7912f66 in 25ms, sequenceid=248, compaction requested=true 2024-12-13T17:23:16,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:16,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73f05a02f16845d2468f6082d7912f66:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:23:16,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:16,725 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:23:16,726 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157964 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:23:16,726 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): 73f05a02f16845d2468f6082d7912f66/info is initiating minor compaction (all files) 2024-12-13T17:23:16,726 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73f05a02f16845d2468f6082d7912f66/info in TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:16,726 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c4d33e682cb340ca87d207bd6591daf1, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/014dfa59fc2a4e44955c78fd2dfe409e, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0ba8df0729fe4c8d9a96cd197e9f2f8c] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp, totalSize=154.3 K 2024-12-13T17:23:16,726 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4d33e682cb340ca87d207bd6591daf1, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1734110566332 2024-12-13T17:23:16,727 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 014dfa59fc2a4e44955c78fd2dfe409e, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734110594662 2024-12-13T17:23:16,727 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ba8df0729fe4c8d9a96cd197e9f2f8c, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734110594688 2024-12-13T17:23:16,736 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73f05a02f16845d2468f6082d7912f66#info#compaction#65 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:23:16,737 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/20610ef9147448959e4c06a33b730755 is 1080, key is row0062/info:/1734110566332/Put/seqid=0 2024-12-13T17:23:16,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741867_1043 (size=148311) 2024-12-13T17:23:16,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741867_1043 (size=148311) 2024-12-13T17:23:16,746 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/20610ef9147448959e4c06a33b730755 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/20610ef9147448959e4c06a33b730755 2024-12-13T17:23:16,751 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73f05a02f16845d2468f6082d7912f66/info of 73f05a02f16845d2468f6082d7912f66 into 20610ef9147448959e4c06a33b730755(size=144.8 K), total size for store is 144.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:23:16,751 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:16,751 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., storeName=73f05a02f16845d2468f6082d7912f66/info, priority=13, startTime=1734110596725; duration=0sec 2024-12-13T17:23:16,751 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:16,751 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73f05a02f16845d2468f6082d7912f66:info 2024-12-13T17:23:16,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:16,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:17,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:17,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:17,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:18,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:18,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:18,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:19,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:19,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:19,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:20,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:20,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:20,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:21,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:21,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:21,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:22,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:22,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:22,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:23,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:23,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:23,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:24,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:24,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:24,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:25,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:25,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:25,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:26,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:26,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:26,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-13T17:23:26,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b44807bc211b4dcd87cd8743a2eacdf9 is 1080, key is row0194/info:/1734110596701/Put/seqid=0 2024-12-13T17:23:26,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-13T17:23:26,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37252 deadline: 1734110616770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=73f05a02f16845d2468f6082d7912f66, server=bcc0a2319104,35103,1734110532467 2024-12-13T17:23:26,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741868_1044 (size=27643) 2024-12-13T17:23:26,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741868_1044 (size=27643) 2024-12-13T17:23:26,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b44807bc211b4dcd87cd8743a2eacdf9 2024-12-13T17:23:26,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b44807bc211b4dcd87cd8743a2eacdf9 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b44807bc211b4dcd87cd8743a2eacdf9 2024-12-13T17:23:26,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b44807bc211b4dcd87cd8743a2eacdf9, entries=21, sequenceid=273, filesize=27.0 K 2024-12-13T17:23:26,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=8.41 KB/8608 for 73f05a02f16845d2468f6082d7912f66 in 30ms, sequenceid=273, compaction requested=false 2024-12-13T17:23:26,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:26,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:26,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:27,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:27,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:27,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:28,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:28,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:28,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:29,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:29,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:29,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:30,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:30,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:30,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:31,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:31,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:31,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:32,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:32,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:32,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:33,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:33,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:33,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:34,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:34,048 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 73f05a02f16845d2468f6082d7912f66, had cached 0 bytes from a total of 175954 2024-12-13T17:23:34,090 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 8c3bc8ab6810cb0271b72345588a17d4, had cached 0 bytes from a total of 70862 2024-12-13T17:23:34,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:34,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:35,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:35,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:35,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:36,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:36,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-13T17:23:36,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/1f1745fbd76b45b9b6f7fb3d72094f06 is 1080, key is row0215/info:/1734110606759/Put/seqid=0 2024-12-13T17:23:36,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741869_1045 (size=14681) 2024-12-13T17:23:36,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741869_1045 (size=14681) 2024-12-13T17:23:36,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/1f1745fbd76b45b9b6f7fb3d72094f06 2024-12-13T17:23:36,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/1f1745fbd76b45b9b6f7fb3d72094f06 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/1f1745fbd76b45b9b6f7fb3d72094f06 2024-12-13T17:23:36,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/1f1745fbd76b45b9b6f7fb3d72094f06, entries=9, sequenceid=285, filesize=14.3 K 2024-12-13T17:23:36,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=1.05 KB/1076 for 73f05a02f16845d2468f6082d7912f66 in 26ms, sequenceid=285, compaction requested=true 2024-12-13T17:23:36,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:36,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73f05a02f16845d2468f6082d7912f66:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:23:36,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:36,814 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:23:36,815 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:23:36,815 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): 73f05a02f16845d2468f6082d7912f66/info is initiating minor compaction (all files) 2024-12-13T17:23:36,815 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73f05a02f16845d2468f6082d7912f66/info in TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:36,815 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/20610ef9147448959e4c06a33b730755, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b44807bc211b4dcd87cd8743a2eacdf9, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/1f1745fbd76b45b9b6f7fb3d72094f06] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp, totalSize=186.2 K 2024-12-13T17:23:36,815 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20610ef9147448959e4c06a33b730755, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734110566332 2024-12-13T17:23:36,816 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting b44807bc211b4dcd87cd8743a2eacdf9, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1734110596701 2024-12-13T17:23:36,816 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f1745fbd76b45b9b6f7fb3d72094f06, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734110606759 2024-12-13T17:23:36,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:36,829 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73f05a02f16845d2468f6082d7912f66#info#compaction#68 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:23:36,829 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/c6e49d6e7dfb4b2faaf1bf69097a8de5 is 1080, key is row0062/info:/1734110566332/Put/seqid=0 2024-12-13T17:23:36,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741870_1046 (size=180785) 2024-12-13T17:23:36,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741870_1046 (size=180785) 2024-12-13T17:23:36,838 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/c6e49d6e7dfb4b2faaf1bf69097a8de5 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c6e49d6e7dfb4b2faaf1bf69097a8de5 2024-12-13T17:23:36,844 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73f05a02f16845d2468f6082d7912f66/info of 73f05a02f16845d2468f6082d7912f66 into c6e49d6e7dfb4b2faaf1bf69097a8de5(size=176.5 K), total size for store is 176.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:23:36,844 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:36,844 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., storeName=73f05a02f16845d2468f6082d7912f66/info, priority=13, startTime=1734110616814; duration=0sec 2024-12-13T17:23:36,844 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:36,844 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73f05a02f16845d2468f6082d7912f66:info 2024-12-13T17:23:36,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:37,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:37,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:37,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:38,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:38,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:38,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:23:38,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/8d7d0faea2614522a84a208b289b5331 is 1080, key is row0224/info:/1734110616788/Put/seqid=0 2024-12-13T17:23:38,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741871_1047 (size=12523) 2024-12-13T17:23:38,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741871_1047 (size=12523) 2024-12-13T17:23:38,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/8d7d0faea2614522a84a208b289b5331 2024-12-13T17:23:38,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:38,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/8d7d0faea2614522a84a208b289b5331 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/8d7d0faea2614522a84a208b289b5331 2024-12-13T17:23:38,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/8d7d0faea2614522a84a208b289b5331, entries=7, sequenceid=296, filesize=12.2 K 2024-12-13T17:23:38,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 73f05a02f16845d2468f6082d7912f66 in 24ms, sequenceid=296, compaction requested=false 2024-12-13T17:23:38,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:38,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35103 {}] regionserver.HRegion(8581): Flush requested on 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:38,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-13T17:23:38,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/27a116cda6c2401db682a0379f7c4d57 is 1080, key is row0231/info:/1734110618806/Put/seqid=0 2024-12-13T17:23:38,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741872_1048 (size=25491) 2024-12-13T17:23:38,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741872_1048 (size=25491) 2024-12-13T17:23:38,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/27a116cda6c2401db682a0379f7c4d57 2024-12-13T17:23:38,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/27a116cda6c2401db682a0379f7c4d57 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/27a116cda6c2401db682a0379f7c4d57 2024-12-13T17:23:38,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/27a116cda6c2401db682a0379f7c4d57, entries=19, sequenceid=318, filesize=24.9 K 2024-12-13T17:23:38,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=7.36 KB/7532 for 73f05a02f16845d2468f6082d7912f66 in 19ms, sequenceid=318, compaction requested=true 2024-12-13T17:23:38,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:38,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73f05a02f16845d2468f6082d7912f66:info, priority=-2147483648, current under compaction store size is 1 2024-12-13T17:23:38,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:38,848 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-13T17:23:38,849 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 218799 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-13T17:23:38,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:38,850 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1540): 73f05a02f16845d2468f6082d7912f66/info is initiating minor compaction (all files) 2024-12-13T17:23:38,850 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 73f05a02f16845d2468f6082d7912f66/info in TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:38,850 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c6e49d6e7dfb4b2faaf1bf69097a8de5, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/8d7d0faea2614522a84a208b289b5331, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/27a116cda6c2401db682a0379f7c4d57] into tmpdir=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp, totalSize=213.7 K 2024-12-13T17:23:38,850 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6e49d6e7dfb4b2faaf1bf69097a8de5, keycount=162, bloomtype=ROW, size=176.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734110566332 2024-12-13T17:23:38,850 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d7d0faea2614522a84a208b289b5331, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1734110616788 2024-12-13T17:23:38,851 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27a116cda6c2401db682a0379f7c4d57, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1734110618806 2024-12-13T17:23:38,861 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73f05a02f16845d2468f6082d7912f66#info#compaction#71 average throughput is 64.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-13T17:23:38,861 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/5371d80638c5419188470d5bf1986033 is 1080, key is row0062/info:/1734110566332/Put/seqid=0 2024-12-13T17:23:38,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741873_1049 (size=209018) 2024-12-13T17:23:38,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741873_1049 (size=209018) 2024-12-13T17:23:38,869 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/5371d80638c5419188470d5bf1986033 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/5371d80638c5419188470d5bf1986033 2024-12-13T17:23:38,874 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 73f05a02f16845d2468f6082d7912f66/info of 73f05a02f16845d2468f6082d7912f66 into 5371d80638c5419188470d5bf1986033(size=204.1 K), total size for store is 204.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-13T17:23:38,874 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:38,874 INFO [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66., storeName=73f05a02f16845d2468f6082d7912f66/info, priority=13, startTime=1734110618848; duration=0sec 2024-12-13T17:23:38,874 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-13T17:23:38,874 DEBUG [RS:0;bcc0a2319104:35103-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73f05a02f16845d2468f6082d7912f66:info 2024-12-13T17:23:39,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:39,599 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 4ae54d53dadb6f7d5f47a16f2daa01e3, had cached 0 bytes from a total of 23930 2024-12-13T17:23:39,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:39,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:40,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:40,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:40,837 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-13T17:23:40,837 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35103%2C1734110532467.1734110620837 2024-12-13T17:23:40,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:40,852 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110532975 with entries=311, filesize=307.59 KB; new WAL /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110620837 2024-12-13T17:23:40,853 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38517:38517),(127.0.0.1/127.0.0.1:37337:37337)] 2024-12-13T17:23:40,853 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110532975 is not closed yet, will try archiving it next time 2024-12-13T17:23:40,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741833_1009 (size=314980) 2024-12-13T17:23:40,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741833_1009 (size=314980) 2024-12-13T17:23:40,858 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 8c3bc8ab6810cb0271b72345588a17d4: 2024-12-13T17:23:40,858 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-13T17:23:40,862 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/info/3b69ba257b694e88bd77176020c3b149 is 193, key is TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66./info:regioninfo/1734110569090/Put/seqid=0 2024-12-13T17:23:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741875_1051 (size=7803) 2024-12-13T17:23:40,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741875_1051 (size=7803) 2024-12-13T17:23:40,866 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/info/3b69ba257b694e88bd77176020c3b149 2024-12-13T17:23:40,872 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/.tmp/info/3b69ba257b694e88bd77176020c3b149 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/info/3b69ba257b694e88bd77176020c3b149 2024-12-13T17:23:40,878 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/info/3b69ba257b694e88bd77176020c3b149, entries=16, sequenceid=24, filesize=7.6 K 2024-12-13T17:23:40,878 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=24, compaction requested=false 2024-12-13T17:23:40,878 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-13T17:23:40,879 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 704a36f7d1b3ea16a3462969bad1e4d2 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T17:23:40,892 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2/.tmp/info/23e79ec20561414bb5227e4b0361b237 is 45, key is default/info:d/1734110533916/Put/seqid=0 2024-12-13T17:23:40,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741876_1052 (size=5037) 2024-12-13T17:23:40,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741876_1052 (size=5037) 2024-12-13T17:23:40,896 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2/.tmp/info/23e79ec20561414bb5227e4b0361b237 2024-12-13T17:23:40,900 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2/.tmp/info/23e79ec20561414bb5227e4b0361b237 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2/info/23e79ec20561414bb5227e4b0361b237 2024-12-13T17:23:40,905 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2/info/23e79ec20561414bb5227e4b0361b237, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T17:23:40,905 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 704a36f7d1b3ea16a3462969bad1e4d2 in 26ms, sequenceid=6, compaction requested=false 2024-12-13T17:23:40,905 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 704a36f7d1b3ea16a3462969bad1e4d2: 2024-12-13T17:23:40,906 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 73f05a02f16845d2468f6082d7912f66 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-13T17:23:40,909 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b2ef69b0404e417cb6ef4ec395772803 is 1080, key is row0250/info:/1734110618830/Put/seqid=0 2024-12-13T17:23:40,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741877_1053 (size=12523) 2024-12-13T17:23:40,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741877_1053 (size=12523) 2024-12-13T17:23:40,914 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b2ef69b0404e417cb6ef4ec395772803 2024-12-13T17:23:40,919 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/.tmp/info/b2ef69b0404e417cb6ef4ec395772803 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b2ef69b0404e417cb6ef4ec395772803 2024-12-13T17:23:40,924 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b2ef69b0404e417cb6ef4ec395772803, entries=7, sequenceid=329, filesize=12.2 K 2024-12-13T17:23:40,925 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 73f05a02f16845d2468f6082d7912f66 in 20ms, sequenceid=329, compaction requested=false 2024-12-13T17:23:40,925 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:40,926 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C35103%2C1734110532467.1734110620925 2024-12-13T17:23:40,933 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110620837 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110620925 2024-12-13T17:23:40,933 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37337:37337),(127.0.0.1/127.0.0.1:38517:38517)] 2024-12-13T17:23:40,933 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110620837 is not closed yet, will try archiving it next time 2024-12-13T17:23:40,934 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110532975 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/oldWALs/bcc0a2319104%2C35103%2C1734110532467.1734110532975 2024-12-13T17:23:40,934 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-13T17:23:40,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741874_1050 (size=1255) 2024-12-13T17:23:40,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741874_1050 (size=1255) 2024-12-13T17:23:40,936 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467/bcc0a2319104%2C35103%2C1734110532467.1734110620837 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/oldWALs/bcc0a2319104%2C35103%2C1734110532467.1734110620837 2024-12-13T17:23:41,034 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T17:23:41,035 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-13T17:23:41,035 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ed470fc to 127.0.0.1:51612 2024-12-13T17:23:41,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:41,035 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T17:23:41,035 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1408733756, stopped=false 2024-12-13T17:23:41,035 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=bcc0a2319104,35795,1734110532328 2024-12-13T17:23:41,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:41,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:23:41,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:23:41,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:41,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:41,082 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T17:23:41,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:41,083 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,35103,1734110532467' ***** 2024-12-13T17:23:41,083 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T17:23:41,084 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:23:41,084 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T17:23:41,084 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T17:23:41,084 INFO [RS:0;bcc0a2319104:35103 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T17:23:41,084 INFO [RS:0;bcc0a2319104:35103 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T17:23:41,084 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(3579): Received CLOSE for 8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:23:41,084 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(3579): Received CLOSE for 704a36f7d1b3ea16a3462969bad1e4d2 2024-12-13T17:23:41,085 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:23:41,085 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(3579): Received CLOSE for 73f05a02f16845d2468f6082d7912f66 2024-12-13T17:23:41,085 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,35103,1734110532467 2024-12-13T17:23:41,085 DEBUG [RS:0;bcc0a2319104:35103 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:41,085 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 8c3bc8ab6810cb0271b72345588a17d4, disabling compactions & flushes 2024-12-13T17:23:41,085 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T17:23:41,085 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T17:23:41,085 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T17:23:41,085 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:23:41,085 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T17:23:41,085 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:23:41,085 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. after waiting 0 ms 2024-12-13T17:23:41,085 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:23:41,085 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-13T17:23:41,085 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1603): Online Regions={8c3bc8ab6810cb0271b72345588a17d4=TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4., 1588230740=hbase:meta,,1.1588230740, 704a36f7d1b3ea16a3462969bad1e4d2=hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2., 73f05a02f16845d2468f6082d7912f66=TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66.} 2024-12-13T17:23:41,085 DEBUG [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 704a36f7d1b3ea16a3462969bad1e4d2, 73f05a02f16845d2468f6082d7912f66, 8c3bc8ab6810cb0271b72345588a17d4 2024-12-13T17:23:41,085 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:23:41,086 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:23:41,086 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:23:41,086 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:23:41,086 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:23:41,086 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae->hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f-bottom] to archive 2024-12-13T17:23:41,087 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T17:23:41,090 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:23:41,091 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-13T17:23:41,092 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T17:23:41,092 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:23:41,092 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:23:41,092 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T17:23:41,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/8c3bc8ab6810cb0271b72345588a17d4/recovered.edits/128.seqid, newMaxSeqId=128, maxSeqId=123 2024-12-13T17:23:41,094 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:23:41,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 8c3bc8ab6810cb0271b72345588a17d4: 2024-12-13T17:23:41,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1734110568418.8c3bc8ab6810cb0271b72345588a17d4. 2024-12-13T17:23:41,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 704a36f7d1b3ea16a3462969bad1e4d2, disabling compactions & flushes 2024-12-13T17:23:41,094 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:23:41,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:23:41,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. after waiting 0 ms 2024-12-13T17:23:41,094 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:23:41,098 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/hbase/namespace/704a36f7d1b3ea16a3462969bad1e4d2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T17:23:41,098 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:23:41,098 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 704a36f7d1b3ea16a3462969bad1e4d2: 2024-12-13T17:23:41,098 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734110533475.704a36f7d1b3ea16a3462969bad1e4d2. 2024-12-13T17:23:41,098 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 73f05a02f16845d2468f6082d7912f66, disabling compactions & flushes 2024-12-13T17:23:41,098 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:41,098 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:41,098 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. after waiting 0 ms 2024-12-13T17:23:41,098 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:41,099 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae->hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/db80f3d14c7da4090fc6ab00d964b5ae/info/e0a05809d54c4c938a8f1d1eef386c1f-top, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/493295dd7e654643a97bce0254c1944b, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b818a57e2d284544965240ad8a1ea871, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/680ca5059e4948d797d211ed7e7ee45e, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/607d6451eff84b8cb2ba87a9e9d817fa, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/ab844e02baca4c82bf8c6108daa8a9fd, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/6dd2b472853e45d58e2dcbfe280e7b59, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/f98dc69e24a54a3abc7060f4c078d8b1, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0d5cabfdc93041889ce27466e6c3c44f, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c4d33e682cb340ca87d207bd6591daf1, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b4af8ac39e0146508c658818be73b4ba, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/014dfa59fc2a4e44955c78fd2dfe409e, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/20610ef9147448959e4c06a33b730755, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0ba8df0729fe4c8d9a96cd197e9f2f8c, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b44807bc211b4dcd87cd8743a2eacdf9, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c6e49d6e7dfb4b2faaf1bf69097a8de5, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/1f1745fbd76b45b9b6f7fb3d72094f06, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/8d7d0faea2614522a84a208b289b5331, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/27a116cda6c2401db682a0379f7c4d57] to archive 2024-12-13T17:23:41,100 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-13T17:23:41,102 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/e0a05809d54c4c938a8f1d1eef386c1f.db80f3d14c7da4090fc6ab00d964b5ae 2024-12-13T17:23:41,102 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-b6fd3a5dd5e24b1cb16573153f20187b 2024-12-13T17:23:41,103 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/493295dd7e654643a97bce0254c1944b to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/493295dd7e654643a97bce0254c1944b 2024-12-13T17:23:41,103 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/TestLogRolling-testLogRolling=db80f3d14c7da4090fc6ab00d964b5ae-cdd6e29d515942cba2369ccb9c50fe2f 2024-12-13T17:23:41,103 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b818a57e2d284544965240ad8a1ea871 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b818a57e2d284544965240ad8a1ea871 2024-12-13T17:23:41,103 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/680ca5059e4948d797d211ed7e7ee45e to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/680ca5059e4948d797d211ed7e7ee45e 2024-12-13T17:23:41,103 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/ab844e02baca4c82bf8c6108daa8a9fd to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/ab844e02baca4c82bf8c6108daa8a9fd 2024-12-13T17:23:41,103 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/607d6451eff84b8cb2ba87a9e9d817fa to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/607d6451eff84b8cb2ba87a9e9d817fa 2024-12-13T17:23:41,104 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/f98dc69e24a54a3abc7060f4c078d8b1 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/f98dc69e24a54a3abc7060f4c078d8b1 2024-12-13T17:23:41,104 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/6dd2b472853e45d58e2dcbfe280e7b59 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/6dd2b472853e45d58e2dcbfe280e7b59 2024-12-13T17:23:41,104 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0d5cabfdc93041889ce27466e6c3c44f to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0d5cabfdc93041889ce27466e6c3c44f 2024-12-13T17:23:41,105 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/20610ef9147448959e4c06a33b730755 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/20610ef9147448959e4c06a33b730755 2024-12-13T17:23:41,105 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c4d33e682cb340ca87d207bd6591daf1 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c4d33e682cb340ca87d207bd6591daf1 2024-12-13T17:23:41,105 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/014dfa59fc2a4e44955c78fd2dfe409e to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/014dfa59fc2a4e44955c78fd2dfe409e 2024-12-13T17:23:41,105 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b4af8ac39e0146508c658818be73b4ba to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b4af8ac39e0146508c658818be73b4ba 2024-12-13T17:23:41,106 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0ba8df0729fe4c8d9a96cd197e9f2f8c to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/0ba8df0729fe4c8d9a96cd197e9f2f8c 2024-12-13T17:23:41,106 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b44807bc211b4dcd87cd8743a2eacdf9 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/b44807bc211b4dcd87cd8743a2eacdf9 2024-12-13T17:23:41,106 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c6e49d6e7dfb4b2faaf1bf69097a8de5 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/c6e49d6e7dfb4b2faaf1bf69097a8de5 2024-12-13T17:23:41,106 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/1f1745fbd76b45b9b6f7fb3d72094f06 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/1f1745fbd76b45b9b6f7fb3d72094f06 2024-12-13T17:23:41,106 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/27a116cda6c2401db682a0379f7c4d57 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/27a116cda6c2401db682a0379f7c4d57 2024-12-13T17:23:41,106 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/8d7d0faea2614522a84a208b289b5331 to hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/archive/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/info/8d7d0faea2614522a84a208b289b5331 2024-12-13T17:23:41,110 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/data/default/TestLogRolling-testLogRolling/73f05a02f16845d2468f6082d7912f66/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=123 2024-12-13T17:23:41,111 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:41,111 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 73f05a02f16845d2468f6082d7912f66: 2024-12-13T17:23:41,111 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1734110568418.73f05a02f16845d2468f6082d7912f66. 2024-12-13T17:23:41,286 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,35103,1734110532467; all regions closed. 2024-12-13T17:23:41,287 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467 2024-12-13T17:23:41,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741834_1010 (size=9351) 2024-12-13T17:23:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741834_1010 (size=9351) 2024-12-13T17:23:41,296 DEBUG [RS:0;bcc0a2319104:35103 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/oldWALs 2024-12-13T17:23:41,296 INFO [RS:0;bcc0a2319104:35103 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C35103%2C1734110532467.meta:.meta(num 1734110533359) 2024-12-13T17:23:41,296 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/WALs/bcc0a2319104,35103,1734110532467 2024-12-13T17:23:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741878_1054 (size=1072) 2024-12-13T17:23:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741878_1054 (size=1072) 2024-12-13T17:23:41,300 DEBUG [RS:0;bcc0a2319104:35103 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/oldWALs 2024-12-13T17:23:41,300 INFO [RS:0;bcc0a2319104:35103 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C35103%2C1734110532467:(num 1734110620925) 2024-12-13T17:23:41,300 DEBUG [RS:0;bcc0a2319104:35103 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:41,300 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:23:41,301 INFO [RS:0;bcc0a2319104:35103 {}] hbase.ChoreService(370): Chore service for: regionserver/bcc0a2319104:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-13T17:23:41,301 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:23:41,301 INFO [RS:0;bcc0a2319104:35103 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35103 2024-12-13T17:23:41,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:23:41,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bcc0a2319104,35103,1734110532467 2024-12-13T17:23:41,314 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bcc0a2319104,35103,1734110532467] 2024-12-13T17:23:41,314 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bcc0a2319104,35103,1734110532467; numProcessing=1 2024-12-13T17:23:41,323 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bcc0a2319104,35103,1734110532467 already deleted, retry=false 2024-12-13T17:23:41,323 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bcc0a2319104,35103,1734110532467 expired; onlineServers=0 2024-12-13T17:23:41,323 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,35795,1734110532328' ***** 2024-12-13T17:23:41,323 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T17:23:41,323 DEBUG [M:0;bcc0a2319104:35795 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4179dbcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:23:41,323 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,35795,1734110532328 2024-12-13T17:23:41,323 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,35795,1734110532328; all regions closed. 2024-12-13T17:23:41,323 DEBUG [M:0;bcc0a2319104:35795 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:41,323 DEBUG [M:0;bcc0a2319104:35795 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T17:23:41,323 DEBUG [M:0;bcc0a2319104:35795 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T17:23:41,323 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T17:23:41,323 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110532748 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110532748,5,FailOnTimeoutGroup] 2024-12-13T17:23:41,323 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110532748 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110532748,5,FailOnTimeoutGroup] 2024-12-13T17:23:41,324 INFO [M:0;bcc0a2319104:35795 {}] hbase.ChoreService(370): Chore service for: master/bcc0a2319104:0 had [] on shutdown 2024-12-13T17:23:41,324 DEBUG [M:0;bcc0a2319104:35795 {}] master.HMaster(1733): Stopping service threads 2024-12-13T17:23:41,324 INFO [M:0;bcc0a2319104:35795 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T17:23:41,324 INFO [M:0;bcc0a2319104:35795 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T17:23:41,324 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T17:23:41,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T17:23:41,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:41,331 DEBUG [M:0;bcc0a2319104:35795 {}] zookeeper.ZKUtil(347): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T17:23:41,331 WARN [M:0;bcc0a2319104:35795 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T17:23:41,331 INFO [M:0;bcc0a2319104:35795 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T17:23:41,331 INFO [M:0;bcc0a2319104:35795 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T17:23:41,332 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:23:41,332 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:41,332 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:41,332 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:23:41,332 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:41,332 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:23:41,332 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.45 KB heapSize=81.70 KB 2024-12-13T17:23:41,349 DEBUG [M:0;bcc0a2319104:35795 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6aa7c5b889c54a20a3d29aaa9c537806 is 82, key is hbase:meta,,1/info:regioninfo/1734110533377/Put/seqid=0 2024-12-13T17:23:41,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741879_1055 (size=5672) 2024-12-13T17:23:41,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741879_1055 (size=5672) 2024-12-13T17:23:41,353 INFO [M:0;bcc0a2319104:35795 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6aa7c5b889c54a20a3d29aaa9c537806 2024-12-13T17:23:41,372 DEBUG [M:0;bcc0a2319104:35795 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/41f144198e744bcdbf65819c161ce7d4 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1734110534535/Put/seqid=0 2024-12-13T17:23:41,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741880_1056 (size=7278) 2024-12-13T17:23:41,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741880_1056 (size=7278) 2024-12-13T17:23:41,377 INFO [M:0;bcc0a2319104:35795 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.85 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/41f144198e744bcdbf65819c161ce7d4 2024-12-13T17:23:41,380 INFO [M:0;bcc0a2319104:35795 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 41f144198e744bcdbf65819c161ce7d4 2024-12-13T17:23:41,395 DEBUG [M:0;bcc0a2319104:35795 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c55affa62ba24fa280da7d2271d47633 is 69, key is bcc0a2319104,35103,1734110532467/rs:state/1734110532820/Put/seqid=0 2024-12-13T17:23:41,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741881_1057 (size=5156) 2024-12-13T17:23:41,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741881_1057 (size=5156) 2024-12-13T17:23:41,400 INFO [M:0;bcc0a2319104:35795 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c55affa62ba24fa280da7d2271d47633 2024-12-13T17:23:41,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:41,415 INFO [RS:0;bcc0a2319104:35103 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,35103,1734110532467; zookeeper connection closed. 2024-12-13T17:23:41,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35103-0x1002069ef590001, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:41,415 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6d1f32f6 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6d1f32f6 2024-12-13T17:23:41,415 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-13T17:23:41,416 DEBUG [M:0;bcc0a2319104:35795 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c730ee9e30c941e385d2ec70ddab710d is 52, key is load_balancer_on/state:d/1734110534106/Put/seqid=0 2024-12-13T17:23:41,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741882_1058 (size=5056) 2024-12-13T17:23:41,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741882_1058 (size=5056) 2024-12-13T17:23:41,421 INFO [M:0;bcc0a2319104:35795 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c730ee9e30c941e385d2ec70ddab710d 2024-12-13T17:23:41,425 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6aa7c5b889c54a20a3d29aaa9c537806 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6aa7c5b889c54a20a3d29aaa9c537806 2024-12-13T17:23:41,429 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6aa7c5b889c54a20a3d29aaa9c537806, entries=8, sequenceid=164, filesize=5.5 K 2024-12-13T17:23:41,430 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/41f144198e744bcdbf65819c161ce7d4 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/41f144198e744bcdbf65819c161ce7d4 2024-12-13T17:23:41,434 INFO [M:0;bcc0a2319104:35795 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 41f144198e744bcdbf65819c161ce7d4 2024-12-13T17:23:41,434 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/41f144198e744bcdbf65819c161ce7d4, entries=18, sequenceid=164, filesize=7.1 K 2024-12-13T17:23:41,435 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c55affa62ba24fa280da7d2271d47633 as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c55affa62ba24fa280da7d2271d47633 2024-12-13T17:23:41,439 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c55affa62ba24fa280da7d2271d47633, entries=1, sequenceid=164, filesize=5.0 K 2024-12-13T17:23:41,440 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c730ee9e30c941e385d2ec70ddab710d as hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c730ee9e30c941e385d2ec70ddab710d 2024-12-13T17:23:41,443 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41263/user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c730ee9e30c941e385d2ec70ddab710d, entries=1, sequenceid=164, filesize=4.9 K 2024-12-13T17:23:41,444 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.45 KB/68043, heapSize ~81.63 KB/83592, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=164, compaction requested=false 2024-12-13T17:23:41,445 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:41,445 DEBUG [M:0;bcc0a2319104:35795 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:23:41,445 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/7128ac75-d9a3-a86f-d49a-df8f5939c426/MasterData/WALs/bcc0a2319104,35795,1734110532328 2024-12-13T17:23:41,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40799 is added to blk_1073741830_1006 (size=79272) 2024-12-13T17:23:41,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44197 is added to blk_1073741830_1006 (size=79272) 2024-12-13T17:23:41,447 INFO [M:0;bcc0a2319104:35795 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T17:23:41,447 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:23:41,447 INFO [M:0;bcc0a2319104:35795 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35795 2024-12-13T17:23:41,556 DEBUG [M:0;bcc0a2319104:35795 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/bcc0a2319104,35795,1734110532328 already deleted, retry=false 2024-12-13T17:23:41,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:41,665 INFO [M:0;bcc0a2319104:35795 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,35795,1734110532328; zookeeper connection closed. 2024-12-13T17:23:41,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35795-0x1002069ef590000, quorum=127.0.0.1:51612, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:41,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c032588{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:23:41,672 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@312ed15c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:23:41,672 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:23:41,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62dd1763{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:23:41,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@363ce62f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir/,STOPPED} 2024-12-13T17:23:41,675 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:23:41,675 WARN [BP-1985376407-172.17.0.2-1734110530861 heartbeating to localhost/127.0.0.1:41263 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:23:41,675 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:23:41,675 WARN [BP-1985376407-172.17.0.2-1734110530861 heartbeating to localhost/127.0.0.1:41263 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1985376407-172.17.0.2-1734110530861 (Datanode Uuid 4f614c03-80b7-4739-8b89-5c9b71ab2255) service to localhost/127.0.0.1:41263 2024-12-13T17:23:41,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data3/current/BP-1985376407-172.17.0.2-1734110530861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:41,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data4/current/BP-1985376407-172.17.0.2-1734110530861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:41,676 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:23:41,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47b1c855{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:23:41,678 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@472b36ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:23:41,678 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:23:41,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d7b6841{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:23:41,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@562ff434{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir/,STOPPED} 2024-12-13T17:23:41,679 WARN [BP-1985376407-172.17.0.2-1734110530861 heartbeating to localhost/127.0.0.1:41263 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:23:41,679 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:23:41,679 WARN [BP-1985376407-172.17.0.2-1734110530861 heartbeating to localhost/127.0.0.1:41263 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1985376407-172.17.0.2-1734110530861 (Datanode Uuid df3967f5-01fd-4f13-bd55-d7f27bffe7a3) service to localhost/127.0.0.1:41263 2024-12-13T17:23:41,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:23:41,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data1/current/BP-1985376407-172.17.0.2-1734110530861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:41,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/cluster_97699885-1d95-5cf6-adbf-615e981655a5/dfs/data/data2/current/BP-1985376407-172.17.0.2-1734110530861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:41,680 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:23:41,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30af8f97{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:23:41,685 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ecfeab9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:23:41,685 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:23:41,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b21935c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:23:41,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@83c844a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir/,STOPPED} 2024-12-13T17:23:41,692 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T17:23:41,718 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-13T17:23:41,724 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=133 (was 114) - Thread LEAK? -, OpenFileDescriptor=487 (was 464) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=118 (was 241), ProcessCount=11 (was 11), AvailableMemoryMB=3060 (was 2952) - AvailableMemoryMB LEAK? - 2024-12-13T17:23:41,729 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=134, OpenFileDescriptor=487, MaxFileDescriptor=1048576, SystemLoadAverage=118, ProcessCount=11, AvailableMemoryMB=3060 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.log.dir so I do NOT create it in target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e2714585-7905-9a1b-cbeb-1258ea2c2556/hadoop.tmp.dir so I do NOT create it in target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e, deleteOnExit=true 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/test.cache.data in system properties and HBase conf 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.tmp.dir in system properties and HBase conf 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.log.dir in system properties and HBase conf 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-13T17:23:41,730 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:23:41,730 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/nfs.dump.dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/java.io.tmpdir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-13T17:23:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-13T17:23:41,742 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:23:41,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:41,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:41,950 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:23:41,953 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:23:41,954 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:23:41,954 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:23:41,954 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-13T17:23:41,955 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:23:41,955 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a7be2a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:23:41,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438448c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:23:42,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:42,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c01f689{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/java.io.tmpdir/jetty-localhost-43179-hadoop-hdfs-3_4_1-tests_jar-_-any-4133908324799192833/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:23:42,047 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51adf726{HTTP/1.1, (http/1.1)}{localhost:43179} 2024-12-13T17:23:42,047 INFO [Time-limited test {}] server.Server(415): Started @394774ms 2024-12-13T17:23:42,057 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-13T17:23:42,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:23:42,217 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:23:42,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:23:42,217 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:23:42,217 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:23:42,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@580042fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:23:42,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@495b4ebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:23:42,308 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75209dae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/java.io.tmpdir/jetty-localhost-44359-hadoop-hdfs-3_4_1-tests_jar-_-any-7065878096591420427/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:23:42,309 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54663214{HTTP/1.1, (http/1.1)}{localhost:44359} 2024-12-13T17:23:42,309 INFO [Time-limited test {}] server.Server(415): Started @395036ms 2024-12-13T17:23:42,310 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:23:42,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-13T17:23:42,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-13T17:23:42,337 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-13T17:23:42,337 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-13T17:23:42,337 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-13T17:23:42,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b0efac0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.log.dir/,AVAILABLE} 2024-12-13T17:23:42,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56c8d5d7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-13T17:23:42,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35f3950c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/java.io.tmpdir/jetty-localhost-38103-hadoop-hdfs-3_4_1-tests_jar-_-any-13808647807634015070/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:23:42,428 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fa677e5{HTTP/1.1, (http/1.1)}{localhost:38103} 2024-12-13T17:23:42,428 INFO [Time-limited test {}] server.Server(415): Started @395155ms 2024-12-13T17:23:42,429 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-13T17:23:42,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:42,848 INFO [regionserver/bcc0a2319104:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:23:42,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:42,905 WARN [Thread-2259 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data1/current/BP-1824788747-172.17.0.2-1734110621754/current, will proceed with Du for space computation calculation, 2024-12-13T17:23:42,905 WARN [Thread-2260 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data2/current/BP-1824788747-172.17.0.2-1734110621754/current, will proceed with Du for space computation calculation, 2024-12-13T17:23:42,920 WARN [Thread-2223 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:23:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ec4ab680b27e36d with lease ID 0xab5adfa5ab1c719e: Processing first storage report for DS-a019c084-d5de-4bf4-b523-2bb5a5c68f9c from datanode DatanodeRegistration(127.0.0.1:41565, datanodeUuid=5a29cb8b-501c-4da3-9998-8f597639f0bf, infoPort=37595, infoSecurePort=0, ipcPort=33089, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754) 2024-12-13T17:23:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ec4ab680b27e36d with lease ID 0xab5adfa5ab1c719e: from storage DS-a019c084-d5de-4bf4-b523-2bb5a5c68f9c node DatanodeRegistration(127.0.0.1:41565, datanodeUuid=5a29cb8b-501c-4da3-9998-8f597639f0bf, infoPort=37595, infoSecurePort=0, ipcPort=33089, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:23:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ec4ab680b27e36d with lease ID 0xab5adfa5ab1c719e: Processing first storage report for DS-4239413c-9ac8-451b-8ed0-86adb5a6e824 from datanode DatanodeRegistration(127.0.0.1:41565, datanodeUuid=5a29cb8b-501c-4da3-9998-8f597639f0bf, infoPort=37595, infoSecurePort=0, ipcPort=33089, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754) 2024-12-13T17:23:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ec4ab680b27e36d with lease ID 0xab5adfa5ab1c719e: from storage DS-4239413c-9ac8-451b-8ed0-86adb5a6e824 node DatanodeRegistration(127.0.0.1:41565, datanodeUuid=5a29cb8b-501c-4da3-9998-8f597639f0bf, infoPort=37595, infoSecurePort=0, ipcPort=33089, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-13T17:23:43,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:43,043 WARN [Thread-2271 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data4/current/BP-1824788747-172.17.0.2-1734110621754/current, will proceed with Du for space computation calculation, 2024-12-13T17:23:43,043 WARN [Thread-2270 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data3/current/BP-1824788747-172.17.0.2-1734110621754/current, will proceed with Du for space computation calculation, 2024-12-13T17:23:43,066 WARN [Thread-2246 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-13T17:23:43,068 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc98772c4075b852f with lease ID 0xab5adfa5ab1c719f: Processing first storage report for DS-93a2eaf0-85f6-4f44-9f3d-008b0fb8bd30 from datanode DatanodeRegistration(127.0.0.1:37433, datanodeUuid=0932d7b0-18b1-4a78-be83-f53f812d705b, infoPort=43781, infoSecurePort=0, ipcPort=33695, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754) 2024-12-13T17:23:43,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc98772c4075b852f with lease ID 0xab5adfa5ab1c719f: from storage DS-93a2eaf0-85f6-4f44-9f3d-008b0fb8bd30 node DatanodeRegistration(127.0.0.1:37433, datanodeUuid=0932d7b0-18b1-4a78-be83-f53f812d705b, infoPort=43781, infoSecurePort=0, ipcPort=33695, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:23:43,068 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc98772c4075b852f with lease ID 0xab5adfa5ab1c719f: Processing first storage report for DS-5591c627-8f17-4008-9b9e-3e8fe69bbd22 from datanode DatanodeRegistration(127.0.0.1:37433, datanodeUuid=0932d7b0-18b1-4a78-be83-f53f812d705b, infoPort=43781, infoSecurePort=0, ipcPort=33695, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754) 2024-12-13T17:23:43,068 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc98772c4075b852f with lease ID 0xab5adfa5ab1c719f: from storage DS-5591c627-8f17-4008-9b9e-3e8fe69bbd22 node DatanodeRegistration(127.0.0.1:37433, datanodeUuid=0932d7b0-18b1-4a78-be83-f53f812d705b, infoPort=43781, infoSecurePort=0, ipcPort=33695, storageInfo=lv=-57;cid=testClusterID;nsid=1372362305;c=1734110621754), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-13T17:23:43,156 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce 2024-12-13T17:23:43,157 DEBUG [Time-limited test {}] zookeeper.MiniZooKeeperCluster(244): Failed binding ZK Server to client port: 60058 java.net.BindException: Address already in use at sun.nio.ch.Net.bind0(Native Method) ~[?:?] at sun.nio.ch.Net.bind(Net.java:555) ~[?:?] at sun.nio.ch.ServerSocketChannelImpl.netBind(ServerSocketChannelImpl.java:337) ~[?:?] at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:294) ~[?:?] at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:89) ~[?:?] at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:81) ~[?:?] at org.apache.zookeeper.server.NIOServerCnxnFactory.configure(NIOServerCnxnFactory.java:662) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.server.ServerCnxnFactory.configure(ServerCnxnFactory.java:109) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.server.ServerCnxnFactory.configure(ServerCnxnFactory.java:105) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster.startup(MiniZooKeeperCluster.java:240) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.HBaseZKTestingUtility.startMiniZKCluster(HBaseZKTestingUtility.java:129) ~[hbase-zookeeper-2.7.0-SNAPSHOT-tests.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.HBaseZKTestingUtility.startMiniZKCluster(HBaseZKTestingUtility.java:102) ~[hbase-zookeeper-2.7.0-SNAPSHOT-tests.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster(HBaseTestingUtility.java:1148) ~[test-classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.setUp(AbstractTestLogRolling.java:150) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.invokeMethod(RunBefores.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-13T17:23:43,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/zookeeper_0, clientPort=60059, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-13T17:23:43,163 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=60059 2024-12-13T17:23:43,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:43,164 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:43,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:23:43,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741825_1001 (size=7) 2024-12-13T17:23:43,172 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160 with version=8 2024-12-13T17:23:43,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:32953/user/jenkins/test-data/c6880846-ed59-ea09-b7a2-b9aadca6c56e/hbase-staging 2024-12-13T17:23:43,174 INFO [Time-limited test {}] client.ConnectionUtils(129): master/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:23:43,174 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:23:43,174 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:23:43,175 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:23:43,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:23:43,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:23:43,175 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:23:43,175 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:23:43,175 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41755 2024-12-13T17:23:43,176 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:43,177 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:43,179 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41755 connecting to ZooKeeper ensemble=127.0.0.1:60059 2024-12-13T17:23:43,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417550x0, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:23:43,230 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41755-0x100206b52390000 connected 2024-12-13T17:23:43,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:23:43,515 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:23:43,517 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:23:43,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41755 2024-12-13T17:23:43,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41755 2024-12-13T17:23:43,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41755 2024-12-13T17:23:43,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41755 2024-12-13T17:23:43,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41755 2024-12-13T17:23:43,524 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160, hbase.cluster.distributed=false 2024-12-13T17:23:43,538 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bcc0a2319104:0 server-side Connection retries=45 2024-12-13T17:23:43,538 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:23:43,539 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-13T17:23:43,539 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-13T17:23:43,539 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-13T17:23:43,539 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-13T17:23:43,539 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-13T17:23:43,539 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-13T17:23:43,539 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41323 2024-12-13T17:23:43,540 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-13T17:23:43,540 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-13T17:23:43,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:43,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:43,544 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41323 connecting to ZooKeeper ensemble=127.0.0.1:60059 2024-12-13T17:23:43,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413230x0, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-13T17:23:43,555 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41323-0x100206b52390001 connected 2024-12-13T17:23:43,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:23:43,556 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:23:43,556 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-13T17:23:43,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41323 2024-12-13T17:23:43,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41323 2024-12-13T17:23:43,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41323 2024-12-13T17:23:43,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41323 2024-12-13T17:23:43,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41323 2024-12-13T17:23:43,559 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/bcc0a2319104,41755,1734110623174 2024-12-13T17:23:43,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:23:43,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:23:43,565 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bcc0a2319104,41755,1734110623174 2024-12-13T17:23:43,571 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bcc0a2319104:41755 2024-12-13T17:23:43,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:23:43,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-13T17:23:43,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,573 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:23:43,573 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bcc0a2319104,41755,1734110623174 from backup master directory 2024-12-13T17:23:43,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:23:43,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bcc0a2319104,41755,1734110623174 2024-12-13T17:23:43,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-13T17:23:43,581 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:23:43,581 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bcc0a2319104,41755,1734110623174 2024-12-13T17:23:43,581 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-13T17:23:43,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:23:43,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741826_1002 (size=42) 2024-12-13T17:23:43,591 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/hbase.id with ID: b5b818e4-33a0-41aa-9535-2febae73ffe8 2024-12-13T17:23:43,601 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:43,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:23:43,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741827_1003 (size=196) 2024-12-13T17:23:43,622 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-13T17:23:43,623 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-13T17:23:43,623 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:23:43,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:23:43,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741828_1004 (size=1189) 2024-12-13T17:23:43,635 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store 2024-12-13T17:23:43,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:23:43,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741829_1005 (size=34) 2024-12-13T17:23:43,641 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:23:43,641 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:23:43,641 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:43,641 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:43,641 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:23:43,641 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:43,641 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:43,641 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:23:43,642 WARN [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/.initializing 2024-12-13T17:23:43,642 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/WALs/bcc0a2319104,41755,1734110623174 2024-12-13T17:23:43,644 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C41755%2C1734110623174, suffix=, logDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/WALs/bcc0a2319104,41755,1734110623174, archiveDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/oldWALs, maxLogs=10 2024-12-13T17:23:43,644 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C41755%2C1734110623174.1734110623644 2024-12-13T17:23:43,649 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/WALs/bcc0a2319104,41755,1734110623174/bcc0a2319104%2C41755%2C1734110623174.1734110623644 2024-12-13T17:23:43,649 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43781:43781),(127.0.0.1/127.0.0.1:37595:37595)] 2024-12-13T17:23:43,649 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:23:43,649 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:23:43,649 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,649 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-13T17:23:43,651 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:23:43,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-13T17:23:43,653 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:23:43,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-13T17:23:43,654 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,655 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:23:43,655 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,656 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-13T17:23:43,656 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,656 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:23:43,657 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,657 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,658 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-13T17:23:43,659 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-13T17:23:43,661 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:23:43,661 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694482, jitterRate=-0.11692069470882416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-13T17:23:43,661 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:23:43,662 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-13T17:23:43,664 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@304ea0a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:23:43,665 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-13T17:23:43,665 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-13T17:23:43,665 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-13T17:23:43,665 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-13T17:23:43,665 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-13T17:23:43,666 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-13T17:23:43,666 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-13T17:23:43,668 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-13T17:23:43,669 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-13T17:23:43,679 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-13T17:23:43,679 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-13T17:23:43,680 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-13T17:23:43,689 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-13T17:23:43,690 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-13T17:23:43,691 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-13T17:23:43,698 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-13T17:23:43,698 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-13T17:23:43,706 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-13T17:23:43,707 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-13T17:23:43,714 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-13T17:23:43,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:23:43,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-13T17:23:43,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,723 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=bcc0a2319104,41755,1734110623174, sessionid=0x100206b52390000, setting cluster-up flag (Was=false) 2024-12-13T17:23:43,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,764 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-13T17:23:43,766 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,41755,1734110623174 2024-12-13T17:23:43,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:43,806 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-13T17:23:43,808 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bcc0a2319104,41755,1734110623174 2024-12-13T17:23:43,813 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-13T17:23:43,813 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-13T17:23:43,813 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-13T17:23:43,814 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bcc0a2319104,41755,1734110623174 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-13T17:23:43,814 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:23:43,814 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:23:43,814 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:23:43,814 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bcc0a2319104:0, corePoolSize=5, maxPoolSize=5 2024-12-13T17:23:43,814 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bcc0a2319104:0, corePoolSize=10, maxPoolSize=10 2024-12-13T17:23:43,814 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,815 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:23:43,815 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,816 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734110653816 2024-12-13T17:23:43,816 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-13T17:23:43,817 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-13T17:23:43,817 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-13T17:23:43,817 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-13T17:23:43,817 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-13T17:23:43,817 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-13T17:23:43,817 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:23:43,817 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-13T17:23:43,817 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,818 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-13T17:23:43,818 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-13T17:23:43,818 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-13T17:23:43,819 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-13T17:23:43,819 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-13T17:23:43,819 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110623819,5,FailOnTimeoutGroup] 2024-12-13T17:23:43,819 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,819 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110623819,5,FailOnTimeoutGroup] 2024-12-13T17:23:43,819 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,819 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:23:43,819 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-13T17:23:43,819 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,819 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:23:43,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741831_1007 (size=1039) 2024-12-13T17:23:43,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:43,825 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-13T17:23:43,825 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160 2024-12-13T17:23:43,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:23:43,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741832_1008 (size=32) 2024-12-13T17:23:43,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:23:43,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:23:43,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:23:43,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:23:43,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:23:43,835 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:23:43,835 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:23:43,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:23:43,837 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:23:43,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:43,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:23:43,838 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740 2024-12-13T17:23:43,838 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740 2024-12-13T17:23:43,840 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:23:43,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:23:43,843 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:23:43,843 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782716, jitterRate=-0.0047251880168914795}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:23:43,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:23:43,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:23:43,844 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:23:43,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:23:43,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:23:43,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:23:43,844 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:23:43,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:23:43,845 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-13T17:23:43,845 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-13T17:23:43,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-13T17:23:43,846 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-13T17:23:43,846 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-13T17:23:43,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:43,873 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bcc0a2319104:41323 2024-12-13T17:23:43,874 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1008): ClusterId : b5b818e4-33a0-41aa-9535-2febae73ffe8 2024-12-13T17:23:43,874 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-13T17:23:43,882 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-13T17:23:43,882 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-13T17:23:43,890 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-13T17:23:43,890 DEBUG [RS:0;bcc0a2319104:41323 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25aed86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:23:43,891 DEBUG [RS:0;bcc0a2319104:41323 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32d926de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:23:43,891 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-13T17:23:43,891 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-13T17:23:43,891 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-13T17:23:43,892 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(3073): reportForDuty to master=bcc0a2319104,41755,1734110623174 with isa=bcc0a2319104/172.17.0.2:41323, startcode=1734110623538 2024-12-13T17:23:43,892 DEBUG [RS:0;bcc0a2319104:41323 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-13T17:23:43,894 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47721, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-13T17:23:43,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41755 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bcc0a2319104,41323,1734110623538 2024-12-13T17:23:43,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41755 {}] master.ServerManager(486): Registering regionserver=bcc0a2319104,41323,1734110623538 2024-12-13T17:23:43,896 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160 2024-12-13T17:23:43,896 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34849 2024-12-13T17:23:43,896 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-13T17:23:43,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:23:43,907 DEBUG [RS:0;bcc0a2319104:41323 {}] zookeeper.ZKUtil(111): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bcc0a2319104,41323,1734110623538 2024-12-13T17:23:43,907 WARN [RS:0;bcc0a2319104:41323 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-13T17:23:43,907 INFO [RS:0;bcc0a2319104:41323 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:23:43,907 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/bcc0a2319104,41323,1734110623538 2024-12-13T17:23:43,907 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bcc0a2319104,41323,1734110623538] 2024-12-13T17:23:43,912 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-13T17:23:43,913 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-13T17:23:43,915 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-13T17:23:43,916 INFO [RS:0;bcc0a2319104:41323 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-13T17:23:43,916 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,916 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-13T17:23:43,918 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,918 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,918 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,918 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,918 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,918 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,918 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bcc0a2319104:0, corePoolSize=2, maxPoolSize=2 2024-12-13T17:23:43,919 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,919 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,919 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,919 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,919 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bcc0a2319104:0, corePoolSize=1, maxPoolSize=1 2024-12-13T17:23:43,919 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:23:43,919 DEBUG [RS:0;bcc0a2319104:41323 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bcc0a2319104:0, corePoolSize=3, maxPoolSize=3 2024-12-13T17:23:43,920 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,920 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,920 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,920 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,920 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41323,1734110623538-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:23:43,936 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-13T17:23:43,936 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41323,1734110623538-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:43,947 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.Replication(204): bcc0a2319104,41323,1734110623538 started 2024-12-13T17:23:43,947 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1767): Serving as bcc0a2319104,41323,1734110623538, RpcServer on bcc0a2319104/172.17.0.2:41323, sessionid=0x100206b52390001 2024-12-13T17:23:43,947 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-13T17:23:43,947 DEBUG [RS:0;bcc0a2319104:41323 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bcc0a2319104,41323,1734110623538 2024-12-13T17:23:43,947 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,41323,1734110623538' 2024-12-13T17:23:43,947 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-13T17:23:43,948 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-13T17:23:43,948 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-13T17:23:43,948 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-13T17:23:43,948 DEBUG [RS:0;bcc0a2319104:41323 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bcc0a2319104,41323,1734110623538 2024-12-13T17:23:43,948 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bcc0a2319104,41323,1734110623538' 2024-12-13T17:23:43,948 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-13T17:23:43,948 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-13T17:23:43,949 DEBUG [RS:0;bcc0a2319104:41323 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-13T17:23:43,949 INFO [RS:0;bcc0a2319104:41323 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-13T17:23:43,949 INFO [RS:0;bcc0a2319104:41323 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-13T17:23:43,997 WARN [bcc0a2319104:41755 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-13T17:23:44,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-13T17:23:44,020 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-13T17:23:44,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:44,052 INFO [RS:0;bcc0a2319104:41323 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C41323%2C1734110623538, suffix=, logDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/bcc0a2319104,41323,1734110623538, archiveDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/oldWALs, maxLogs=32 2024-12-13T17:23:44,053 INFO [RS:0;bcc0a2319104:41323 {}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C41323%2C1734110623538.1734110624053 2024-12-13T17:23:44,064 INFO [RS:0;bcc0a2319104:41323 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/bcc0a2319104,41323,1734110623538/bcc0a2319104%2C41323%2C1734110623538.1734110624053 2024-12-13T17:23:44,064 DEBUG [RS:0;bcc0a2319104:41323 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37595:37595),(127.0.0.1/127.0.0.1:43781:43781)] 2024-12-13T17:23:44,247 DEBUG [bcc0a2319104:41755 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-13T17:23:44,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bcc0a2319104,41323,1734110623538 2024-12-13T17:23:44,248 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,41323,1734110623538, state=OPENING 2024-12-13T17:23:44,290 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-13T17:23:44,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:44,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:44,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=bcc0a2319104,41323,1734110623538}] 2024-12-13T17:23:44,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:23:44,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:23:44,455 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bcc0a2319104,41323,1734110623538 2024-12-13T17:23:44,455 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-13T17:23:44,460 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40670, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-13T17:23:44,466 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-13T17:23:44,467 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:23:44,469 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bcc0a2319104%2C41323%2C1734110623538.meta, suffix=.meta, logDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/bcc0a2319104,41323,1734110623538, archiveDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/oldWALs, maxLogs=32 2024-12-13T17:23:44,470 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor bcc0a2319104%2C41323%2C1734110623538.meta.1734110624470.meta 2024-12-13T17:23:44,475 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/bcc0a2319104,41323,1734110623538/bcc0a2319104%2C41323%2C1734110623538.meta.1734110624470.meta 2024-12-13T17:23:44,475 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37595:37595),(127.0.0.1/127.0.0.1:43781:43781)] 2024-12-13T17:23:44,475 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:23:44,476 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-13T17:23:44,476 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-13T17:23:44,476 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-13T17:23:44,476 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-13T17:23:44,476 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:23:44,476 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-13T17:23:44,476 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-13T17:23:44,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-13T17:23:44,478 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-13T17:23:44,478 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:44,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:23:44,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-13T17:23:44,480 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-13T17:23:44,480 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:44,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:23:44,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-13T17:23:44,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-13T17:23:44,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:44,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-13T17:23:44,482 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740 2024-12-13T17:23:44,483 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740 2024-12-13T17:23:44,484 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-13T17:23:44,485 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-13T17:23:44,486 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805633, jitterRate=0.024416178464889526}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-13T17:23:44,486 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-13T17:23:44,487 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734110624455 2024-12-13T17:23:44,488 DEBUG [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-13T17:23:44,488 INFO [RS_OPEN_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-13T17:23:44,489 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,41323,1734110623538 2024-12-13T17:23:44,489 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bcc0a2319104,41323,1734110623538, state=OPEN 2024-12-13T17:23:44,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:23:44,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-13T17:23:44,514 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:23:44,514 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-13T17:23:44,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-13T17:23:44,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=bcc0a2319104,41323,1734110623538 in 214 msec 2024-12-13T17:23:44,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-13T17:23:44,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 671 msec 2024-12-13T17:23:44,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 708 msec 2024-12-13T17:23:44,520 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734110624520, completionTime=-1 2024-12-13T17:23:44,520 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-13T17:23:44,520 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-13T17:23:44,521 DEBUG [hconnection-0x723ed6b2-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:23:44,522 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:23:44,523 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-13T17:23:44,523 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734110684523 2024-12-13T17:23:44,524 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734110744524 2024-12-13T17:23:44,524 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-13T17:23:44,548 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41755,1734110623174-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:44,548 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41755,1734110623174-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:44,548 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41755,1734110623174-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:44,548 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bcc0a2319104:41755, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:44,548 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:44,549 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-13T17:23:44,549 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-13T17:23:44,550 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-13T17:23:44,550 DEBUG [master/bcc0a2319104:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-13T17:23:44,551 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-13T17:23:44,551 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:44,553 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-13T17:23:44,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:23:44,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741835_1011 (size=358) 2024-12-13T17:23:44,566 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1474b9bab6798f04753b892b73c9626e, NAME => 'hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160 2024-12-13T17:23:44,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:23:44,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741836_1012 (size=42) 2024-12-13T17:23:44,576 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:23:44,576 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 1474b9bab6798f04753b892b73c9626e, disabling compactions & flushes 2024-12-13T17:23:44,576 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:44,576 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:44,577 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. after waiting 0 ms 2024-12-13T17:23:44,577 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:44,577 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:44,577 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1474b9bab6798f04753b892b73c9626e: 2024-12-13T17:23:44,578 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-13T17:23:44,578 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734110624578"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734110624578"}]},"ts":"1734110624578"} 2024-12-13T17:23:44,580 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-13T17:23:44,580 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-13T17:23:44,581 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110624580"}]},"ts":"1734110624580"} 2024-12-13T17:23:44,582 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-13T17:23:44,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=1474b9bab6798f04753b892b73c9626e, ASSIGN}] 2024-12-13T17:23:44,599 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=1474b9bab6798f04753b892b73c9626e, ASSIGN 2024-12-13T17:23:44,600 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=1474b9bab6798f04753b892b73c9626e, ASSIGN; state=OFFLINE, location=bcc0a2319104,41323,1734110623538; forceNewPlan=false, retain=false 2024-12-13T17:23:44,751 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=1474b9bab6798f04753b892b73c9626e, regionState=OPENING, regionLocation=bcc0a2319104,41323,1734110623538 2024-12-13T17:23:44,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 1474b9bab6798f04753b892b73c9626e, server=bcc0a2319104,41323,1734110623538}] 2024-12-13T17:23:44,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.meta.1734110348445.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:44,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/WALs/bcc0a2319104,43669,1734110347446/bcc0a2319104%2C43669%2C1734110347446.1734110348078 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:44,912 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bcc0a2319104,41323,1734110623538 2024-12-13T17:23:44,920 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:44,920 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 1474b9bab6798f04753b892b73c9626e, NAME => 'hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e.', STARTKEY => '', ENDKEY => ''} 2024-12-13T17:23:44,921 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:44,921 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-13T17:23:44,921 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:44,921 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:44,922 INFO [StoreOpener-1474b9bab6798f04753b892b73c9626e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:44,924 INFO [StoreOpener-1474b9bab6798f04753b892b73c9626e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1474b9bab6798f04753b892b73c9626e columnFamilyName info 2024-12-13T17:23:44,924 DEBUG [StoreOpener-1474b9bab6798f04753b892b73c9626e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-13T17:23:44,925 INFO [StoreOpener-1474b9bab6798f04753b892b73c9626e-1 {}] regionserver.HStore(327): Store=1474b9bab6798f04753b892b73c9626e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-13T17:23:44,925 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:44,926 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:44,928 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:44,931 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-13T17:23:44,931 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 1474b9bab6798f04753b892b73c9626e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735717, jitterRate=-0.06448806822299957}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-13T17:23:44,932 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 1474b9bab6798f04753b892b73c9626e: 2024-12-13T17:23:44,933 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e., pid=6, masterSystemTime=1734110624911 2024-12-13T17:23:44,934 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:44,934 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:44,935 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=1474b9bab6798f04753b892b73c9626e, regionState=OPEN, openSeqNum=2, regionLocation=bcc0a2319104,41323,1734110623538 2024-12-13T17:23:44,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-13T17:23:44,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 1474b9bab6798f04753b892b73c9626e, server=bcc0a2319104,41323,1734110623538 in 181 msec 2024-12-13T17:23:44,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-13T17:23:44,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=1474b9bab6798f04753b892b73c9626e, ASSIGN in 340 msec 2024-12-13T17:23:44,940 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-13T17:23:44,940 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734110624940"}]},"ts":"1734110624940"} 2024-12-13T17:23:44,941 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-13T17:23:44,948 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-13T17:23:44,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 400 msec 2024-12-13T17:23:44,951 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-13T17:23:44,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:23:44,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:44,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:44,959 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-13T17:23:44,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:23:44,983 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-12-13T17:23:44,992 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-13T17:23:45,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-13T17:23:45,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 24 msec 2024-12-13T17:23:45,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34581/user/jenkins/test-data/d4e2f07e-6f8e-fad8-1eea-39f7bfbc4cf4/MasterData/WALs/bcc0a2319104,41247,1734110347301/bcc0a2319104%2C41247%2C1734110347301.1734110347568 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor119.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-13T17:23:45,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-13T17:23:45,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.475sec 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41755,1734110623174-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-13T17:23:45,056 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41755,1734110623174-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-13T17:23:45,058 DEBUG [master/bcc0a2319104:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-13T17:23:45,058 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-13T17:23:45,058 INFO [master/bcc0a2319104:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bcc0a2319104,41755,1734110623174-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-13T17:23:45,062 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x741e66f5 to 127.0.0.1:60059 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a721abe 2024-12-13T17:23:45,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a094020, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-13T17:23:45,076 DEBUG [hconnection-0x4daf3064-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-13T17:23:45,078 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-13T17:23:45,080 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=bcc0a2319104,41755,1734110623174 2024-12-13T17:23:45,080 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-13T17:23:45,083 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-13T17:23:45,083 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-13T17:23:45,086 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/test.com,8080,1, archiveDir=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/oldWALs, maxLogs=32 2024-12-13T17:23:45,087 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1734110625087 2024-12-13T17:23:45,092 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/test.com,8080,1/test.com%2C8080%2C1.1734110625087 2024-12-13T17:23:45,092 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37595:37595),(127.0.0.1/127.0.0.1:43781:43781)] 2024-12-13T17:23:45,092 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1734110625092 2024-12-13T17:23:45,097 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/test.com,8080,1/test.com%2C8080%2C1.1734110625087 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/test.com,8080,1/test.com%2C8080%2C1.1734110625092 2024-12-13T17:23:45,097 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43781:43781),(127.0.0.1/127.0.0.1:37595:37595)] 2024-12-13T17:23:45,097 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/test.com,8080,1/test.com%2C8080%2C1.1734110625087 is not closed yet, will try archiving it next time 2024-12-13T17:23:45,098 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/test.com,8080,1 2024-12-13T17:23:45,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741837_1013 (size=93) 2024-12-13T17:23:45,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741838_1014 (size=93) 2024-12-13T17:23:45,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741837_1013 (size=93) 2024-12-13T17:23:45,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741838_1014 (size=93) 2024-12-13T17:23:45,099 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/test.com,8080,1/test.com%2C8080%2C1.1734110625087 to hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/oldWALs/test.com%2C8080%2C1.1734110625087 2024-12-13T17:23:45,101 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/oldWALs 2024-12-13T17:23:45,101 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1734110625092) 2024-12-13T17:23:45,101 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-13T17:23:45,101 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x741e66f5 to 127.0.0.1:60059 2024-12-13T17:23:45,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:45,101 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-13T17:23:45,101 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=62349058, stopped=false 2024-12-13T17:23:45,101 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=bcc0a2319104,41755,1734110623174 2024-12-13T17:23:45,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:23:45,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-13T17:23:45,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:45,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:45,114 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-13T17:23:45,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:45,115 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:23:45,115 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,41323,1734110623538' ***** 2024-12-13T17:23:45,115 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-13T17:23:45,116 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-13T17:23:45,116 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-13T17:23:45,116 INFO [RS:0;bcc0a2319104:41323 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-13T17:23:45,116 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-13T17:23:45,116 INFO [RS:0;bcc0a2319104:41323 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-13T17:23:45,116 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(3579): Received CLOSE for 1474b9bab6798f04753b892b73c9626e 2024-12-13T17:23:45,117 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,41323,1734110623538 2024-12-13T17:23:45,117 DEBUG [RS:0;bcc0a2319104:41323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:45,117 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-13T17:23:45,117 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-13T17:23:45,117 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 1474b9bab6798f04753b892b73c9626e, disabling compactions & flushes 2024-12-13T17:23:45,117 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:45,117 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. after waiting 0 ms 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:45,117 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 1474b9bab6798f04753b892b73c9626e 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-13T17:23:45,117 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-13T17:23:45,117 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1603): Online Regions={1474b9bab6798f04753b892b73c9626e=hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e., 1588230740=hbase:meta,,1.1588230740} 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-13T17:23:45,117 DEBUG [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1629): Waiting on 1474b9bab6798f04753b892b73c9626e, 1588230740 2024-12-13T17:23:45,117 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-13T17:23:45,117 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-13T17:23:45,118 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-13T17:23:45,132 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/.tmp/info/88a6c3b70bd54cd5868515d90958fe46 is 143, key is hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e./info:regioninfo/1734110624935/Put/seqid=0 2024-12-13T17:23:45,132 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e/.tmp/info/ff66a491f1ef4493b22af22c8bca098f is 45, key is default/info:d/1734110624963/Put/seqid=0 2024-12-13T17:23:45,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741839_1015 (size=6595) 2024-12-13T17:23:45,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741840_1016 (size=5037) 2024-12-13T17:23:45,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741839_1015 (size=6595) 2024-12-13T17:23:45,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741840_1016 (size=5037) 2024-12-13T17:23:45,137 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/.tmp/info/88a6c3b70bd54cd5868515d90958fe46 2024-12-13T17:23:45,137 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e/.tmp/info/ff66a491f1ef4493b22af22c8bca098f 2024-12-13T17:23:45,142 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e/.tmp/info/ff66a491f1ef4493b22af22c8bca098f as hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e/info/ff66a491f1ef4493b22af22c8bca098f 2024-12-13T17:23:45,146 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e/info/ff66a491f1ef4493b22af22c8bca098f, entries=2, sequenceid=6, filesize=4.9 K 2024-12-13T17:23:45,147 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 1474b9bab6798f04753b892b73c9626e in 29ms, sequenceid=6, compaction requested=false 2024-12-13T17:23:45,147 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-13T17:23:45,150 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/namespace/1474b9bab6798f04753b892b73c9626e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-13T17:23:45,150 INFO [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:45,151 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 1474b9bab6798f04753b892b73c9626e: 2024-12-13T17:23:45,151 DEBUG [RS_CLOSE_REGION-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734110624549.1474b9bab6798f04753b892b73c9626e. 2024-12-13T17:23:45,155 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/.tmp/table/5edfd3ea01cc4d04a44d4f5d38763ce1 is 51, key is hbase:namespace/table:state/1734110624940/Put/seqid=0 2024-12-13T17:23:45,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741841_1017 (size=5242) 2024-12-13T17:23:45,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741841_1017 (size=5242) 2024-12-13T17:23:45,160 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/.tmp/table/5edfd3ea01cc4d04a44d4f5d38763ce1 2024-12-13T17:23:45,164 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/.tmp/info/88a6c3b70bd54cd5868515d90958fe46 as hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/info/88a6c3b70bd54cd5868515d90958fe46 2024-12-13T17:23:45,168 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/info/88a6c3b70bd54cd5868515d90958fe46, entries=10, sequenceid=9, filesize=6.4 K 2024-12-13T17:23:45,168 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/.tmp/table/5edfd3ea01cc4d04a44d4f5d38763ce1 as hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/table/5edfd3ea01cc4d04a44d4f5d38763ce1 2024-12-13T17:23:45,173 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/table/5edfd3ea01cc4d04a44d4f5d38763ce1, entries=2, sequenceid=9, filesize=5.1 K 2024-12-13T17:23:45,173 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 56ms, sequenceid=9, compaction requested=false 2024-12-13T17:23:45,173 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-13T17:23:45,177 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-13T17:23:45,178 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-13T17:23:45,178 INFO [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-13T17:23:45,178 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-13T17:23:45,178 DEBUG [RS_CLOSE_META-regionserver/bcc0a2319104:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-13T17:23:45,317 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,41323,1734110623538; all regions closed. 2024-12-13T17:23:45,318 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/bcc0a2319104,41323,1734110623538 2024-12-13T17:23:45,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741834_1010 (size=2484) 2024-12-13T17:23:45,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741834_1010 (size=2484) 2024-12-13T17:23:45,323 DEBUG [RS:0;bcc0a2319104:41323 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/oldWALs 2024-12-13T17:23:45,323 INFO [RS:0;bcc0a2319104:41323 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C41323%2C1734110623538.meta:.meta(num 1734110624470) 2024-12-13T17:23:45,324 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/WALs/bcc0a2319104,41323,1734110623538 2024-12-13T17:23:45,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741833_1009 (size=1414) 2024-12-13T17:23:45,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741833_1009 (size=1414) 2024-12-13T17:23:45,329 DEBUG [RS:0;bcc0a2319104:41323 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/oldWALs 2024-12-13T17:23:45,329 INFO [RS:0;bcc0a2319104:41323 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog bcc0a2319104%2C41323%2C1734110623538:(num 1734110624053) 2024-12-13T17:23:45,329 DEBUG [RS:0;bcc0a2319104:41323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:45,329 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.LeaseManager(133): Closed leases 2024-12-13T17:23:45,329 INFO [RS:0;bcc0a2319104:41323 {}] hbase.ChoreService(370): Chore service for: regionserver/bcc0a2319104:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-13T17:23:45,330 INFO [regionserver/bcc0a2319104:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:23:45,330 INFO [RS:0;bcc0a2319104:41323 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41323 2024-12-13T17:23:45,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bcc0a2319104,41323,1734110623538 2024-12-13T17:23:45,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-13T17:23:45,348 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bcc0a2319104,41323,1734110623538] 2024-12-13T17:23:45,348 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bcc0a2319104,41323,1734110623538; numProcessing=1 2024-12-13T17:23:45,356 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bcc0a2319104,41323,1734110623538 already deleted, retry=false 2024-12-13T17:23:45,356 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bcc0a2319104,41323,1734110623538 expired; onlineServers=0 2024-12-13T17:23:45,356 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bcc0a2319104,41755,1734110623174' ***** 2024-12-13T17:23:45,356 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-13T17:23:45,356 DEBUG [M:0;bcc0a2319104:41755 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@223de1bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bcc0a2319104/172.17.0.2:0 2024-12-13T17:23:45,356 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HRegionServer(1224): stopping server bcc0a2319104,41755,1734110623174 2024-12-13T17:23:45,356 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HRegionServer(1250): stopping server bcc0a2319104,41755,1734110623174; all regions closed. 2024-12-13T17:23:45,356 DEBUG [M:0;bcc0a2319104:41755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-13T17:23:45,356 DEBUG [M:0;bcc0a2319104:41755 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-13T17:23:45,357 DEBUG [M:0;bcc0a2319104:41755 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-13T17:23:45,357 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-13T17:23:45,357 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110623819 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.large.0-1734110623819,5,FailOnTimeoutGroup] 2024-12-13T17:23:45,357 DEBUG [master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110623819 {}] cleaner.HFileCleaner(306): Exit Thread[master/bcc0a2319104:0:becomeActiveMaster-HFileCleaner.small.0-1734110623819,5,FailOnTimeoutGroup] 2024-12-13T17:23:45,357 INFO [M:0;bcc0a2319104:41755 {}] hbase.ChoreService(370): Chore service for: master/bcc0a2319104:0 had [] on shutdown 2024-12-13T17:23:45,357 DEBUG [M:0;bcc0a2319104:41755 {}] master.HMaster(1733): Stopping service threads 2024-12-13T17:23:45,357 INFO [M:0;bcc0a2319104:41755 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-13T17:23:45,357 INFO [M:0;bcc0a2319104:41755 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-13T17:23:45,357 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-13T17:23:45,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-13T17:23:45,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-13T17:23:45,365 DEBUG [M:0;bcc0a2319104:41755 {}] zookeeper.ZKUtil(347): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-13T17:23:45,365 WARN [M:0;bcc0a2319104:41755 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-13T17:23:45,365 INFO [M:0;bcc0a2319104:41755 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-13T17:23:45,365 INFO [M:0;bcc0a2319104:41755 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-13T17:23:45,365 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-13T17:23:45,365 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-13T17:23:45,365 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:45,365 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:45,365 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-13T17:23:45,365 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:45,365 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-13T17:23:45,385 DEBUG [M:0;bcc0a2319104:41755 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/669b699cbd414f468508dfbb41bfad08 is 82, key is hbase:meta,,1/info:regioninfo/1734110624488/Put/seqid=0 2024-12-13T17:23:45,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741842_1018 (size=5672) 2024-12-13T17:23:45,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741842_1018 (size=5672) 2024-12-13T17:23:45,390 INFO [M:0;bcc0a2319104:41755 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/669b699cbd414f468508dfbb41bfad08 2024-12-13T17:23:45,414 DEBUG [M:0;bcc0a2319104:41755 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1e4edc82d22540129fa18da361c34280 is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1734110624949/Put/seqid=0 2024-12-13T17:23:45,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741843_1019 (size=6626) 2024-12-13T17:23:45,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741843_1019 (size=6626) 2024-12-13T17:23:45,419 INFO [M:0;bcc0a2319104:41755 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1e4edc82d22540129fa18da361c34280 2024-12-13T17:23:45,435 DEBUG [M:0;bcc0a2319104:41755 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1373cdb57ec9457eb4729794bed2a90e is 69, key is bcc0a2319104,41323,1734110623538/rs:state/1734110623895/Put/seqid=0 2024-12-13T17:23:45,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741844_1020 (size=5156) 2024-12-13T17:23:45,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741844_1020 (size=5156) 2024-12-13T17:23:45,440 INFO [M:0;bcc0a2319104:41755 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1373cdb57ec9457eb4729794bed2a90e 2024-12-13T17:23:45,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:45,448 INFO [RS:0;bcc0a2319104:41323 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,41323,1734110623538; zookeeper connection closed. 2024-12-13T17:23:45,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41323-0x100206b52390001, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:45,448 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5746f3ea {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5746f3ea 2024-12-13T17:23:45,448 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-13T17:23:45,457 DEBUG [M:0;bcc0a2319104:41755 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ff9df20a834c44aeac96dd453bde61a6 is 52, key is load_balancer_on/state:d/1734110625082/Put/seqid=0 2024-12-13T17:23:45,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741845_1021 (size=5056) 2024-12-13T17:23:45,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741845_1021 (size=5056) 2024-12-13T17:23:45,462 INFO [M:0;bcc0a2319104:41755 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ff9df20a834c44aeac96dd453bde61a6 2024-12-13T17:23:45,466 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/669b699cbd414f468508dfbb41bfad08 as hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/669b699cbd414f468508dfbb41bfad08 2024-12-13T17:23:45,470 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/669b699cbd414f468508dfbb41bfad08, entries=8, sequenceid=70, filesize=5.5 K 2024-12-13T17:23:45,471 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1e4edc82d22540129fa18da361c34280 as hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1e4edc82d22540129fa18da361c34280 2024-12-13T17:23:45,476 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1e4edc82d22540129fa18da361c34280, entries=8, sequenceid=70, filesize=6.5 K 2024-12-13T17:23:45,477 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1373cdb57ec9457eb4729794bed2a90e as hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1373cdb57ec9457eb4729794bed2a90e 2024-12-13T17:23:45,481 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1373cdb57ec9457eb4729794bed2a90e, entries=1, sequenceid=70, filesize=5.0 K 2024-12-13T17:23:45,482 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ff9df20a834c44aeac96dd453bde61a6 as hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ff9df20a834c44aeac96dd453bde61a6 2024-12-13T17:23:45,487 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34849/user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ff9df20a834c44aeac96dd453bde61a6, entries=1, sequenceid=70, filesize=4.9 K 2024-12-13T17:23:45,488 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=70, compaction requested=false 2024-12-13T17:23:45,490 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-13T17:23:45,490 DEBUG [M:0;bcc0a2319104:41755 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-13T17:23:45,490 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/8a6cb24a-e734-dd78-9639-91dfb2e6f160/MasterData/WALs/bcc0a2319104,41755,1734110623174 2024-12-13T17:23:45,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41565 is added to blk_1073741830_1006 (size=31030) 2024-12-13T17:23:45,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37433 is added to blk_1073741830_1006 (size=31030) 2024-12-13T17:23:45,492 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-13T17:23:45,492 INFO [M:0;bcc0a2319104:41755 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-13T17:23:45,492 INFO [M:0;bcc0a2319104:41755 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41755 2024-12-13T17:23:45,498 DEBUG [M:0;bcc0a2319104:41755 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/bcc0a2319104,41755,1734110623174 already deleted, retry=false 2024-12-13T17:23:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:45,606 INFO [M:0;bcc0a2319104:41755 {}] regionserver.HRegionServer(1307): Exiting; stopping=bcc0a2319104,41755,1734110623174; zookeeper connection closed. 2024-12-13T17:23:45,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41755-0x100206b52390000, quorum=127.0.0.1:60059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-13T17:23:45,609 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35f3950c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:23:45,609 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fa677e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:23:45,609 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:23:45,610 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56c8d5d7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:23:45,610 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b0efac0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.log.dir/,STOPPED} 2024-12-13T17:23:45,611 WARN [BP-1824788747-172.17.0.2-1734110621754 heartbeating to localhost/127.0.0.1:34849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:23:45,611 WARN [BP-1824788747-172.17.0.2-1734110621754 heartbeating to localhost/127.0.0.1:34849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1824788747-172.17.0.2-1734110621754 (Datanode Uuid 0932d7b0-18b1-4a78-be83-f53f812d705b) service to localhost/127.0.0.1:34849 2024-12-13T17:23:45,611 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:23:45,611 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:23:45,612 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data3/current/BP-1824788747-172.17.0.2-1734110621754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:45,612 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data4/current/BP-1824788747-172.17.0.2-1734110621754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:45,612 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:23:45,615 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75209dae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-13T17:23:45,615 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54663214{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:23:45,615 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:23:45,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@495b4ebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:23:45,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@580042fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.log.dir/,STOPPED} 2024-12-13T17:23:45,617 WARN [BP-1824788747-172.17.0.2-1734110621754 heartbeating to localhost/127.0.0.1:34849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-13T17:23:45,617 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-13T17:23:45,617 WARN [BP-1824788747-172.17.0.2-1734110621754 heartbeating to localhost/127.0.0.1:34849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1824788747-172.17.0.2-1734110621754 (Datanode Uuid 5a29cb8b-501c-4da3-9998-8f597639f0bf) service to localhost/127.0.0.1:34849 2024-12-13T17:23:45,617 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-13T17:23:45,617 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data1/current/BP-1824788747-172.17.0.2-1734110621754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:45,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/cluster_2c19175c-c403-f264-ec98-b88bf95ebd3e/dfs/data/data2/current/BP-1824788747-172.17.0.2-1734110621754 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-13T17:23:45,618 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-13T17:23:45,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c01f689{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-13T17:23:45,623 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51adf726{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-13T17:23:45,623 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-13T17:23:45,623 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438448c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-13T17:23:45,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a7be2a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/32b615d6-1836-5f91-90e3-28f7cc7e08ce/hadoop.log.dir/,STOPPED} 2024-12-13T17:23:45,629 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-13T17:23:45,642 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-13T17:23:45,648 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=155 (was 134) - Thread LEAK? -, OpenFileDescriptor=519 (was 487) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=125 (was 118) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3041 (was 3060)