2024-12-03 13:05:55,399 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-03 13:05:55,417 main DEBUG Took 0.013433 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 13:05:55,419 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 13:05:55,419 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 13:05:55,423 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 13:05:55,425 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,435 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 13:05:55,454 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,456 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,457 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,458 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,458 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,459 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,460 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,460 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,461 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,461 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,463 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,463 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,464 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,464 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,465 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,465 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,466 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,467 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,467 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,468 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,469 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,469 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,470 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,470 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 13:05:55,471 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,471 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 13:05:55,473 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 13:05:55,475 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 13:05:55,477 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 13:05:55,478 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 13:05:55,480 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 13:05:55,481 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 13:05:55,493 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 13:05:55,497 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 13:05:55,499 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 13:05:55,500 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 13:05:55,500 main DEBUG createAppenders(={Console}) 2024-12-03 13:05:55,501 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc initialized 2024-12-03 13:05:55,502 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-03 13:05:55,502 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc OK. 2024-12-03 13:05:55,503 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 13:05:55,503 main DEBUG OutputStream closed 2024-12-03 13:05:55,504 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 13:05:55,504 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 13:05:55,504 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@53ce1329 OK 2024-12-03 13:05:55,626 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 13:05:55,629 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 13:05:55,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 13:05:55,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 13:05:55,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 13:05:55,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 13:05:55,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 13:05:55,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 13:05:55,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 13:05:55,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 13:05:55,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 13:05:55,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 13:05:55,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 13:05:55,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 13:05:55,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 13:05:55,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 13:05:55,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 13:05:55,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 13:05:55,643 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 13:05:55,643 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-03 13:05:55,644 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 13:05:55,645 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-03T13:05:55,997 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc 2024-12-03 13:05:56,001 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 13:05:56,001 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T13:05:56,014 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-03T13:05:56,125 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=1461, ProcessCount=11, AvailableMemoryMB=2576 2024-12-03T13:05:56,130 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T13:05:56,135 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4, deleteOnExit=true 2024-12-03T13:05:56,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-03T13:05:56,136 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/test.cache.data in system properties and HBase conf 2024-12-03T13:05:56,137 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T13:05:56,138 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir in system properties and HBase conf 2024-12-03T13:05:56,139 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T13:05:56,140 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T13:05:56,140 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-03T13:05:56,308 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T13:05:56,453 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T13:05:56,458 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:05:56,459 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:05:56,460 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T13:05:56,461 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:05:56,462 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T13:05:56,462 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T13:05:56,463 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:05:56,464 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:05:56,464 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T13:05:56,465 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/nfs.dump.dir in system properties and HBase conf 2024-12-03T13:05:56,466 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/java.io.tmpdir in system properties and HBase conf 2024-12-03T13:05:56,466 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:05:56,467 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T13:05:56,468 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T13:05:57,273 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:05:58,039 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T13:05:58,158 INFO [Time-limited test {}] log.Log(170): Logging initialized @3922ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T13:05:58,269 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:05:58,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:05:58,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:05:58,409 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:05:58,411 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:05:58,435 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:05:58,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24b482a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:05:58,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75cb83b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:05:58,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c97b431{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/java.io.tmpdir/jetty-localhost-42075-hadoop-hdfs-3_4_1-tests_jar-_-any-14962778931316940534/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:05:58,762 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5718b892{HTTP/1.1, (http/1.1)}{localhost:42075} 2024-12-03T13:05:58,762 INFO [Time-limited test {}] server.Server(415): Started @4527ms 2024-12-03T13:05:58,799 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:05:59,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:05:59,419 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:05:59,421 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:05:59,421 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:05:59,421 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:05:59,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6659e51{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:05:59,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77047f54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:05:59,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5bf4a5dd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/java.io.tmpdir/jetty-localhost-43351-hadoop-hdfs-3_4_1-tests_jar-_-any-18042224643072062559/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:05:59,547 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38b7daa8{HTTP/1.1, (http/1.1)}{localhost:43351} 2024-12-03T13:05:59,548 INFO [Time-limited test {}] server.Server(415): Started @5312ms 2024-12-03T13:05:59,598 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:05:59,720 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:05:59,733 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:05:59,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:05:59,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:05:59,750 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:05:59,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27e71ed3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:05:59,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5303a55e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:05:59,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e210958{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/java.io.tmpdir/jetty-localhost-37477-hadoop-hdfs-3_4_1-tests_jar-_-any-17207921676321053217/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:05:59,904 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5298b9{HTTP/1.1, (http/1.1)}{localhost:37477} 2024-12-03T13:05:59,906 INFO [Time-limited test {}] server.Server(415): Started @5671ms 2024-12-03T13:05:59,909 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:06:01,046 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data3/current/BP-1197494549-172.17.0.2-1733231157423/current, will proceed with Du for space computation calculation, 2024-12-03T13:06:01,046 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data2/current/BP-1197494549-172.17.0.2-1733231157423/current, will proceed with Du for space computation calculation, 2024-12-03T13:06:01,046 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data4/current/BP-1197494549-172.17.0.2-1733231157423/current, will proceed with Du for space computation calculation, 2024-12-03T13:06:01,046 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data1/current/BP-1197494549-172.17.0.2-1733231157423/current, will proceed with Du for space computation calculation, 2024-12-03T13:06:01,110 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:06:01,111 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:06:01,177 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaa5ee5033d70294a with lease ID 0xc22daa1eab805b0d: Processing first storage report for DS-584a9991-ef4e-4020-be2d-f5c069507376 from datanode DatanodeRegistration(127.0.0.1:45207, datanodeUuid=a4f432e0-94c2-4c91-abbb-cdae87e4ad4c, infoPort=41431, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423) 2024-12-03T13:06:01,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa5ee5033d70294a with lease ID 0xc22daa1eab805b0d: from storage DS-584a9991-ef4e-4020-be2d-f5c069507376 node DatanodeRegistration(127.0.0.1:45207, datanodeUuid=a4f432e0-94c2-4c91-abbb-cdae87e4ad4c, infoPort=41431, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-03T13:06:01,180 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10a0c338ef8e6227 with lease ID 0xc22daa1eab805b0c: Processing first storage report for DS-6473a138-8ded-4e00-a9ff-915b1763b253 from datanode DatanodeRegistration(127.0.0.1:43439, datanodeUuid=d3e3b013-4b2c-4727-8381-1b9b0b18c190, infoPort=34175, infoSecurePort=0, ipcPort=46249, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423) 2024-12-03T13:06:01,180 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10a0c338ef8e6227 with lease ID 0xc22daa1eab805b0c: from storage DS-6473a138-8ded-4e00-a9ff-915b1763b253 node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=d3e3b013-4b2c-4727-8381-1b9b0b18c190, infoPort=34175, infoSecurePort=0, ipcPort=46249, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:06:01,180 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaa5ee5033d70294a with lease ID 0xc22daa1eab805b0d: Processing first storage report for DS-0f00de5f-0af0-4779-9b71-f26f2052f469 from datanode DatanodeRegistration(127.0.0.1:45207, datanodeUuid=a4f432e0-94c2-4c91-abbb-cdae87e4ad4c, infoPort=41431, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423) 2024-12-03T13:06:01,180 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaa5ee5033d70294a with lease ID 0xc22daa1eab805b0d: from storage DS-0f00de5f-0af0-4779-9b71-f26f2052f469 node DatanodeRegistration(127.0.0.1:45207, datanodeUuid=a4f432e0-94c2-4c91-abbb-cdae87e4ad4c, infoPort=41431, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T13:06:01,181 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10a0c338ef8e6227 with lease ID 0xc22daa1eab805b0c: Processing first storage report for DS-3f85ab31-b4a0-4fab-b595-874684bc95f1 from datanode DatanodeRegistration(127.0.0.1:43439, datanodeUuid=d3e3b013-4b2c-4727-8381-1b9b0b18c190, infoPort=34175, infoSecurePort=0, ipcPort=46249, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423) 2024-12-03T13:06:01,181 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10a0c338ef8e6227 with lease ID 0xc22daa1eab805b0c: from storage DS-3f85ab31-b4a0-4fab-b595-874684bc95f1 node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=d3e3b013-4b2c-4727-8381-1b9b0b18c190, infoPort=34175, infoSecurePort=0, ipcPort=46249, storageInfo=lv=-57;cid=testClusterID;nsid=446740066;c=1733231157423), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:06:01,271 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc 2024-12-03T13:06:01,375 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/zookeeper_0, clientPort=58576, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T13:06:01,387 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=58576 2024-12-03T13:06:01,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:01,400 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:06:01,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:06:02,098 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138 with version=8 2024-12-03T13:06:02,099 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/hbase-staging 2024-12-03T13:06:02,238 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T13:06:02,564 INFO [Time-limited test {}] client.ConnectionUtils(129): master/35946657df09:0 server-side Connection retries=45 2024-12-03T13:06:02,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:06:02,587 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:06:02,587 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:06:02,588 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:06:02,588 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:06:02,743 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:06:02,818 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T13:06:02,829 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T13:06:02,834 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:06:02,871 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 5282 (auto-detected) 2024-12-03T13:06:02,873 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-03T13:06:02,906 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38233 2024-12-03T13:06:02,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:02,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:02,936 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38233 connecting to ZooKeeper ensemble=127.0.0.1:58576 2024-12-03T13:06:03,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:382330x0, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:06:03,063 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38233-0x1019c9652780000 connected 2024-12-03T13:06:03,185 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:06:03,192 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:06:03,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:06:03,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38233 2024-12-03T13:06:03,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38233 2024-12-03T13:06:03,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38233 2024-12-03T13:06:03,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38233 2024-12-03T13:06:03,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38233 2024-12-03T13:06:03,212 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138, hbase.cluster.distributed=false 2024-12-03T13:06:03,294 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/35946657df09:0 server-side Connection retries=45 2024-12-03T13:06:03,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:06:03,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:06:03,295 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:06:03,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:06:03,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:06:03,299 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:06:03,302 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:06:03,304 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40603 2024-12-03T13:06:03,306 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T13:06:03,315 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T13:06:03,317 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:03,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:03,330 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:40603 connecting to ZooKeeper ensemble=127.0.0.1:58576 2024-12-03T13:06:03,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406030x0, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:06:03,341 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406030x0, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:06:03,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406030x0, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:06:03,346 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406030x0, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:06:03,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40603-0x1019c9652780001 connected 2024-12-03T13:06:03,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40603 2024-12-03T13:06:03,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40603 2024-12-03T13:06:03,359 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40603 2024-12-03T13:06:03,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40603 2024-12-03T13:06:03,363 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40603 2024-12-03T13:06:03,367 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/35946657df09,38233,1733231162231 2024-12-03T13:06:03,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:06:03,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:06:03,384 DEBUG [M:0;35946657df09:38233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;35946657df09:38233 2024-12-03T13:06:03,384 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/35946657df09,38233,1733231162231 2024-12-03T13:06:03,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:06:03,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:03,417 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:06:03,417 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:06:03,417 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/35946657df09,38233,1733231162231 from backup master directory 2024-12-03T13:06:03,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:06:03,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:03,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/35946657df09,38233,1733231162231 2024-12-03T13:06:03,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:06:03,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:06:03,434 WARN [master/35946657df09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:06:03,434 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=35946657df09,38233,1733231162231 2024-12-03T13:06:03,436 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T13:06:03,447 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T13:06:03,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:06:03,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:06:04,033 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/hbase.id with ID: c5a2afcb-36e6-4923-8654-110a5ec813f4 2024-12-03T13:06:04,104 INFO [master/35946657df09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:04,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:04,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:04,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:06:04,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:06:04,236 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:06:04,239 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T13:06:04,248 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:06:04,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:06:04,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:06:04,385 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store 2024-12-03T13:06:04,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:06:04,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:06:04,886 INFO [master/35946657df09:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T13:06:04,887 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:04,889 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:06:04,889 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:06:04,889 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:06:04,890 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:06:04,890 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:06:04,890 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:06:04,890 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:06:04,893 WARN [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/.initializing 2024-12-03T13:06:04,894 DEBUG [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/WALs/35946657df09,38233,1733231162231 2024-12-03T13:06:04,917 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C38233%2C1733231162231, suffix=, logDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/WALs/35946657df09,38233,1733231162231, archiveDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/oldWALs, maxLogs=10 2024-12-03T13:06:04,934 INFO [master/35946657df09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C38233%2C1733231162231.1733231164927 2024-12-03T13:06:04,935 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-03T13:06:04,935 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-03T13:06:05,003 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/WALs/35946657df09,38233,1733231162231/35946657df09%2C38233%2C1733231162231.1733231164927 2024-12-03T13:06:05,012 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:34175:34175)] 2024-12-03T13:06:05,013 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:06:05,013 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:05,019 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,022 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,109 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T13:06:05,115 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:06:05,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T13:06:05,123 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:06:05,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,131 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T13:06:05,131 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:06:05,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T13:06:05,137 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:06:05,143 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,145 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,157 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T13:06:05,162 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:06:05,168 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:06:05,171 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718046, jitterRate=-0.0869574248790741}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T13:06:05,176 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:06:05,178 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T13:06:05,208 DEBUG [master/35946657df09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c934baa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:06:05,250 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-03T13:06:05,266 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T13:06:05,266 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T13:06:05,269 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T13:06:05,275 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 6 msec 2024-12-03T13:06:05,282 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-12-03T13:06:05,282 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T13:06:05,320 INFO [master/35946657df09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T13:06:05,347 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T13:06:05,367 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-03T13:06:05,370 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T13:06:05,375 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T13:06:05,381 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-03T13:06:05,384 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T13:06:05,391 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T13:06:05,398 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-03T13:06:05,401 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T13:06:05,447 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T13:06:05,472 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T13:06:05,482 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T13:06:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:06:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:06:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:05,525 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=35946657df09,38233,1733231162231, sessionid=0x1019c9652780000, setting cluster-up flag (Was=false) 2024-12-03T13:06:05,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:05,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:05,623 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T13:06:05,625 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,38233,1733231162231 2024-12-03T13:06:05,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:05,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:05,665 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T13:06:05,667 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,38233,1733231162231 2024-12-03T13:06:05,766 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-03T13:06:05,773 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-03T13:06:05,776 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T13:06:05,782 DEBUG [RS:0;35946657df09:40603 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;35946657df09:40603 2024-12-03T13:06:05,784 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1008): ClusterId : c5a2afcb-36e6-4923-8654-110a5ec813f4 2024-12-03T13:06:05,783 DEBUG [master/35946657df09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 35946657df09,38233,1733231162231 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T13:06:05,787 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T13:06:05,787 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:06:05,787 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:06:05,787 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:06:05,787 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:06:05,788 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/35946657df09:0, corePoolSize=10, maxPoolSize=10 2024-12-03T13:06:05,788 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:05,788 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:06:05,788 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:05,789 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733231195789 2024-12-03T13:06:05,790 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T13:06:05,792 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T13:06:05,792 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:06:05,793 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-03T13:06:05,795 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T13:06:05,796 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T13:06:05,796 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T13:06:05,796 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T13:06:05,797 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,797 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:05,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:06:05,799 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T13:06:05,801 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T13:06:05,802 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T13:06:05,802 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T13:06:05,802 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T13:06:05,804 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T13:06:05,805 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T13:06:05,808 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T13:06:05,808 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231165806,5,FailOnTimeoutGroup] 2024-12-03T13:06:05,809 DEBUG [RS:0;35946657df09:40603 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6843f6a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:06:05,809 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231165808,5,FailOnTimeoutGroup] 2024-12-03T13:06:05,809 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:05,809 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T13:06:05,810 DEBUG [RS:0;35946657df09:40603 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d04a651, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:06:05,811 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:05,811 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:06:05,814 INFO [RS:0;35946657df09:40603 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T13:06:05,814 INFO [RS:0;35946657df09:40603 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T13:06:05,814 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T13:06:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:06:05,816 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(3073): reportForDuty to master=35946657df09,38233,1733231162231 with isa=35946657df09/172.17.0.2:40603, startcode=1733231163292 2024-12-03T13:06:05,821 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-03T13:06:05,821 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138 2024-12-03T13:06:05,828 DEBUG [RS:0;35946657df09:40603 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:06:05,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:06:05,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:06:05,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:05,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:06:05,849 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:06:05,849 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:06:05,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:06:05,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:06:05,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:06:05,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:06:05,857 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:06:05,858 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:05,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:06:05,861 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740 2024-12-03T13:06:05,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740 2024-12-03T13:06:05,864 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:06:05,867 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:06:05,871 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:06:05,875 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793350, jitterRate=0.008797213435173035}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:06:05,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:06:05,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:06:05,879 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:06:05,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:06:05,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:06:05,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:06:05,881 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:06:05,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:06:05,884 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:06:05,884 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-03T13:06:05,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T13:06:05,897 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T13:06:05,899 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T13:06:05,903 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37285, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:06:05,909 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38233 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 35946657df09,40603,1733231163292 2024-12-03T13:06:05,911 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38233 {}] master.ServerManager(486): Registering regionserver=35946657df09,40603,1733231163292 2024-12-03T13:06:05,924 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138 2024-12-03T13:06:05,924 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39447 2024-12-03T13:06:05,924 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T13:06:05,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:06:05,977 DEBUG [RS:0;35946657df09:40603 {}] zookeeper.ZKUtil(111): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/35946657df09,40603,1733231163292 2024-12-03T13:06:05,977 WARN [RS:0;35946657df09:40603 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:06:05,978 INFO [RS:0;35946657df09:40603 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:06:05,978 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292 2024-12-03T13:06:05,979 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [35946657df09,40603,1733231163292] 2024-12-03T13:06:05,989 DEBUG [RS:0;35946657df09:40603 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T13:06:05,999 INFO [RS:0;35946657df09:40603 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T13:06:06,009 INFO [RS:0;35946657df09:40603 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T13:06:06,012 INFO [RS:0;35946657df09:40603 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:06:06,012 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,012 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T13:06:06,018 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,018 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,018 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,019 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,020 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:06:06,020 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:06:06,020 DEBUG [RS:0;35946657df09:40603 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:06:06,021 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,021 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,021 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,021 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,021 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40603,1733231163292-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:06:06,038 INFO [RS:0;35946657df09:40603 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T13:06:06,040 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40603,1733231163292-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,049 WARN [35946657df09:38233 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-03T13:06:06,059 INFO [RS:0;35946657df09:40603 {}] regionserver.Replication(204): 35946657df09,40603,1733231163292 started 2024-12-03T13:06:06,059 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1767): Serving as 35946657df09,40603,1733231163292, RpcServer on 35946657df09/172.17.0.2:40603, sessionid=0x1019c9652780001 2024-12-03T13:06:06,059 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T13:06:06,060 DEBUG [RS:0;35946657df09:40603 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 35946657df09,40603,1733231163292 2024-12-03T13:06:06,060 DEBUG [RS:0;35946657df09:40603 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,40603,1733231163292' 2024-12-03T13:06:06,060 DEBUG [RS:0;35946657df09:40603 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T13:06:06,061 DEBUG [RS:0;35946657df09:40603 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T13:06:06,061 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T13:06:06,061 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T13:06:06,062 DEBUG [RS:0;35946657df09:40603 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 35946657df09,40603,1733231163292 2024-12-03T13:06:06,062 DEBUG [RS:0;35946657df09:40603 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,40603,1733231163292' 2024-12-03T13:06:06,062 DEBUG [RS:0;35946657df09:40603 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T13:06:06,062 DEBUG [RS:0;35946657df09:40603 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T13:06:06,063 DEBUG [RS:0;35946657df09:40603 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T13:06:06,063 INFO [RS:0;35946657df09:40603 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T13:06:06,063 INFO [RS:0;35946657df09:40603 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T13:06:06,170 INFO [RS:0;35946657df09:40603 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C40603%2C1733231163292, suffix=, logDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292, archiveDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs, maxLogs=32 2024-12-03T13:06:06,173 INFO [RS:0;35946657df09:40603 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231166173 2024-12-03T13:06:06,183 INFO [RS:0;35946657df09:40603 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231166173 2024-12-03T13:06:06,183 DEBUG [RS:0;35946657df09:40603 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:34175:34175)] 2024-12-03T13:06:06,301 DEBUG [35946657df09:38233 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T13:06:06,305 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=35946657df09,40603,1733231163292 2024-12-03T13:06:06,309 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,40603,1733231163292, state=OPENING 2024-12-03T13:06:06,323 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T13:06:06,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:06,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:06,332 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:06:06,332 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:06:06,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=35946657df09,40603,1733231163292}] 2024-12-03T13:06:06,504 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,40603,1733231163292 2024-12-03T13:06:06,506 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T13:06:06,509 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T13:06:06,522 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-03T13:06:06,523 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:06:06,526 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C40603%2C1733231163292.meta, suffix=.meta, logDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292, archiveDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs, maxLogs=32 2024-12-03T13:06:06,528 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.meta.1733231166528.meta 2024-12-03T13:06:06,537 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.meta.1733231166528.meta 2024-12-03T13:06:06,537 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:34175:34175)] 2024-12-03T13:06:06,537 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:06:06,539 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T13:06:06,588 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T13:06:06,591 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T13:06:06,596 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T13:06:06,596 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:06,596 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-03T13:06:06,596 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-03T13:06:06,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:06:06,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:06:06,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:06,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:06:06,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:06:06,603 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:06:06,603 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:06,603 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:06:06,603 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:06:06,605 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:06:06,605 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:06,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:06:06,607 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740 2024-12-03T13:06:06,609 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740 2024-12-03T13:06:06,612 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:06:06,615 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:06:06,616 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845274, jitterRate=0.07482261955738068}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:06:06,618 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:06:06,624 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733231166499 2024-12-03T13:06:06,634 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T13:06:06,634 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-03T13:06:06,635 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40603,1733231163292 2024-12-03T13:06:06,637 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,40603,1733231163292, state=OPEN 2024-12-03T13:06:06,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:06:06,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:06:06,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:06:06,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:06:06,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T13:06:06,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=35946657df09,40603,1733231163292 in 353 msec 2024-12-03T13:06:06,699 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T13:06:06,699 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 804 msec 2024-12-03T13:06:06,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 989 msec 2024-12-03T13:06:06,706 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733231166706, completionTime=-1 2024-12-03T13:06:06,706 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T13:06:06,706 DEBUG [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-03T13:06:06,737 DEBUG [hconnection-0x70d09903-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:06:06,739 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:06:06,749 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-03T13:06:06,749 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733231226749 2024-12-03T13:06:06,750 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733231286750 2024-12-03T13:06:06,750 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 43 msec 2024-12-03T13:06:06,818 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,38233,1733231162231-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,819 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,38233,1733231162231-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,819 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,38233,1733231162231-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,820 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-35946657df09:38233, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,821 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:06,826 DEBUG [master/35946657df09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-03T13:06:06,829 INFO [master/35946657df09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-03T13:06:06,830 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:06:06,836 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-03T13:06:06,839 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:06:06,840 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:06,842 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:06:06,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:06:06,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:06:06,858 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cbc38bc35014cb0cf824dc8d928df3b8, NAME => 'hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138 2024-12-03T13:06:06,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:06:06,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:06:07,272 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:07,272 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing cbc38bc35014cb0cf824dc8d928df3b8, disabling compactions & flushes 2024-12-03T13:06:07,272 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:06:07,272 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:06:07,272 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. after waiting 0 ms 2024-12-03T13:06:07,273 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:06:07,273 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:06:07,273 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for cbc38bc35014cb0cf824dc8d928df3b8: 2024-12-03T13:06:07,275 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:06:07,283 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733231167276"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231167276"}]},"ts":"1733231167276"} 2024-12-03T13:06:07,316 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:06:07,318 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:06:07,322 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231167318"}]},"ts":"1733231167318"} 2024-12-03T13:06:07,327 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-03T13:06:07,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=cbc38bc35014cb0cf824dc8d928df3b8, ASSIGN}] 2024-12-03T13:06:07,386 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=cbc38bc35014cb0cf824dc8d928df3b8, ASSIGN 2024-12-03T13:06:07,389 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=cbc38bc35014cb0cf824dc8d928df3b8, ASSIGN; state=OFFLINE, location=35946657df09,40603,1733231163292; forceNewPlan=false, retain=false 2024-12-03T13:06:07,540 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=cbc38bc35014cb0cf824dc8d928df3b8, regionState=OPENING, regionLocation=35946657df09,40603,1733231163292 2024-12-03T13:06:07,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure cbc38bc35014cb0cf824dc8d928df3b8, server=35946657df09,40603,1733231163292}] 2024-12-03T13:06:07,713 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,40603,1733231163292 2024-12-03T13:06:07,720 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:06:07,720 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => cbc38bc35014cb0cf824dc8d928df3b8, NAME => 'hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:06:07,721 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:06:07,721 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:07,721 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:06:07,721 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:06:07,732 INFO [StoreOpener-cbc38bc35014cb0cf824dc8d928df3b8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:06:07,747 INFO [StoreOpener-cbc38bc35014cb0cf824dc8d928df3b8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cbc38bc35014cb0cf824dc8d928df3b8 columnFamilyName info 2024-12-03T13:06:07,748 DEBUG [StoreOpener-cbc38bc35014cb0cf824dc8d928df3b8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:07,766 INFO [StoreOpener-cbc38bc35014cb0cf824dc8d928df3b8-1 {}] regionserver.HStore(327): Store=cbc38bc35014cb0cf824dc8d928df3b8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:06:07,771 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:06:07,783 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:06:07,819 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:06:07,842 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:06:07,844 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened cbc38bc35014cb0cf824dc8d928df3b8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779243, jitterRate=-0.009141907095909119}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:06:07,846 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for cbc38bc35014cb0cf824dc8d928df3b8: 2024-12-03T13:06:07,851 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8., pid=6, masterSystemTime=1733231167712 2024-12-03T13:06:07,860 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:06:07,861 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:06:07,863 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=cbc38bc35014cb0cf824dc8d928df3b8, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40603,1733231163292 2024-12-03T13:06:07,871 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38233 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=35946657df09,40603,1733231163292, table=hbase:namespace, region=cbc38bc35014cb0cf824dc8d928df3b8. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T13:06:07,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T13:06:07,889 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure cbc38bc35014cb0cf824dc8d928df3b8, server=35946657df09,40603,1733231163292 in 321 msec 2024-12-03T13:06:07,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T13:06:07,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=cbc38bc35014cb0cf824dc8d928df3b8, ASSIGN in 504 msec 2024-12-03T13:06:07,896 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:06:07,896 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231167896"}]},"ts":"1733231167896"} 2024-12-03T13:06:07,900 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-03T13:06:07,950 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-03T13:06:07,951 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:06:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:06:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:06:07,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.1200 sec 2024-12-03T13:06:08,021 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-03T13:06:08,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:06:08,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 102 msec 2024-12-03T13:06:08,120 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-03T13:06:08,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:06:08,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 48 msec 2024-12-03T13:06:08,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-03T13:06:08,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-03T13:06:08,240 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.805sec 2024-12-03T13:06:08,242 INFO [master/35946657df09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T13:06:08,244 INFO [master/35946657df09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T13:06:08,245 INFO [master/35946657df09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T13:06:08,246 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T13:06:08,246 INFO [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T13:06:08,247 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,38233,1733231162231-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:06:08,247 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,38233,1733231162231-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T13:06:08,271 DEBUG [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-03T13:06:08,273 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T13:06:08,273 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,38233,1733231162231-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:06:08,300 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x260e636b to 127.0.0.1:58576 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64019975 2024-12-03T13:06:08,300 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-03T13:06:08,320 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d512531, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:06:08,324 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T13:06:08,324 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T13:06:08,347 DEBUG [hconnection-0x4298bb96-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:06:08,420 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:06:08,435 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=35946657df09,38233,1733231162231 2024-12-03T13:06:08,436 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:06:08,458 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-03T13:06:08,468 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T13:06:08,487 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T13:06:08,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38233 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T13:06:08,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38233 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T13:06:08,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38233 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:06:08,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38233 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-03T13:06:08,518 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:06:08,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38233 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-03T13:06:08,518 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:08,521 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:06:08,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38233 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:06:08,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741837_1013 (size=389) 2024-12-03T13:06:08,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741837_1013 (size=389) 2024-12-03T13:06:08,958 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8e221e153e39a23226788dac72ada430, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138 2024-12-03T13:06:09,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741838_1014 (size=72) 2024-12-03T13:06:09,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741838_1014 (size=72) 2024-12-03T13:06:09,046 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:09,046 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 8e221e153e39a23226788dac72ada430, disabling compactions & flushes 2024-12-03T13:06:09,047 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:09,047 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:09,047 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. after waiting 0 ms 2024-12-03T13:06:09,047 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:09,047 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:09,047 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:06:09,059 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:06:09,059 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733231169059"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231169059"}]},"ts":"1733231169059"} 2024-12-03T13:06:09,082 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:06:09,087 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:06:09,087 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231169087"}]},"ts":"1733231169087"} 2024-12-03T13:06:09,101 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-03T13:06:09,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8e221e153e39a23226788dac72ada430, ASSIGN}] 2024-12-03T13:06:09,135 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8e221e153e39a23226788dac72ada430, ASSIGN 2024-12-03T13:06:09,137 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8e221e153e39a23226788dac72ada430, ASSIGN; state=OFFLINE, location=35946657df09,40603,1733231163292; forceNewPlan=false, retain=false 2024-12-03T13:06:09,287 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=8e221e153e39a23226788dac72ada430, regionState=OPENING, regionLocation=35946657df09,40603,1733231163292 2024-12-03T13:06:09,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 8e221e153e39a23226788dac72ada430, server=35946657df09,40603,1733231163292}] 2024-12-03T13:06:09,447 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 35946657df09,40603,1733231163292 2024-12-03T13:06:09,467 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:09,467 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 8e221e153e39a23226788dac72ada430, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:06:09,468 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 8e221e153e39a23226788dac72ada430 2024-12-03T13:06:09,468 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:06:09,468 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 8e221e153e39a23226788dac72ada430 2024-12-03T13:06:09,468 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 8e221e153e39a23226788dac72ada430 2024-12-03T13:06:09,491 INFO [StoreOpener-8e221e153e39a23226788dac72ada430-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8e221e153e39a23226788dac72ada430 2024-12-03T13:06:09,517 INFO [StoreOpener-8e221e153e39a23226788dac72ada430-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e221e153e39a23226788dac72ada430 columnFamilyName info 2024-12-03T13:06:09,518 DEBUG [StoreOpener-8e221e153e39a23226788dac72ada430-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:06:09,529 INFO [StoreOpener-8e221e153e39a23226788dac72ada430-1 {}] regionserver.HStore(327): Store=8e221e153e39a23226788dac72ada430/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:06:09,536 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430 2024-12-03T13:06:09,540 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430 2024-12-03T13:06:09,563 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 8e221e153e39a23226788dac72ada430 2024-12-03T13:06:09,573 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:06:09,575 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 8e221e153e39a23226788dac72ada430; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718936, jitterRate=-0.0858256071805954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:06:09,578 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:06:09,587 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430., pid=11, masterSystemTime=1733231169447 2024-12-03T13:06:09,596 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:09,597 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:09,598 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=8e221e153e39a23226788dac72ada430, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40603,1733231163292 2024-12-03T13:06:09,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-03T13:06:09,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 8e221e153e39a23226788dac72ada430, server=35946657df09,40603,1733231163292 in 320 msec 2024-12-03T13:06:09,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T13:06:09,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8e221e153e39a23226788dac72ada430, ASSIGN in 486 msec 2024-12-03T13:06:09,625 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:06:09,625 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231169625"}]},"ts":"1733231169625"} 2024-12-03T13:06:09,636 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-03T13:06:09,653 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:06:09,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 1.1430 sec 2024-12-03T13:06:12,107 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T13:06:12,150 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T13:06:12,151 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-03T13:06:12,152 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-03T13:06:12,814 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T13:06:12,814 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T13:06:12,816 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T13:06:12,816 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T13:06:12,817 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-03T13:06:12,817 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-03T13:06:12,818 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T13:06:12,818 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T13:06:12,818 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T13:06:12,818 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T13:06:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38233 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:06:18,533 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-03T13:06:18,538 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-03T13:06:18,539 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:06:18,540 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231178540 2024-12-03T13:06:18,564 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231166173 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231178540 2024-12-03T13:06:18,565 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:34175:34175)] 2024-12-03T13:06:18,565 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231166173 is not closed yet, will try archiving it next time 2024-12-03T13:06:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741833_1009 (size=955) 2024-12-03T13:06:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741833_1009 (size=955) 2024-12-03T13:06:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40603 {}] regionserver.HRegion(8581): Flush requested on 8e221e153e39a23226788dac72ada430 2024-12-03T13:06:30,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e221e153e39a23226788dac72ada430 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:06:30,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/da3c6053d83144c99cd29a98f62b2692 is 1080, key is row0001/info:/1733231178576/Put/seqid=0 2024-12-03T13:06:30,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741840_1016 (size=12509) 2024-12-03T13:06:30,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741840_1016 (size=12509) 2024-12-03T13:06:30,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/da3c6053d83144c99cd29a98f62b2692 2024-12-03T13:06:30,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/da3c6053d83144c99cd29a98f62b2692 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692 2024-12-03T13:06:30,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T13:06:30,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8e221e153e39a23226788dac72ada430 in 130ms, sequenceid=11, compaction requested=false 2024-12-03T13:06:30,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:06:31,267 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:06:38,611 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231198610 2024-12-03T13:06:38,822 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:06:38,824 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231178540 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231198610 2024-12-03T13:06:38,824 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34175:34175),(127.0.0.1/127.0.0.1:41431:41431)] 2024-12-03T13:06:38,824 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231178540 is not closed yet, will try archiving it next time 2024-12-03T13:06:38,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741839_1015 (size=12399) 2024-12-03T13:06:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741839_1015 (size=12399) 2024-12-03T13:06:39,028 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:39,813 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:06:39,815 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:06:41,231 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:43,435 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:45,639 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:45,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40603 {}] regionserver.HRegion(8581): Flush requested on 8e221e153e39a23226788dac72ada430 2024-12-03T13:06:45,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e221e153e39a23226788dac72ada430 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:06:45,841 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:45,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/42dc3d328de248f1a4c33304d4f5bf7e is 1080, key is row0008/info:/1733231192601/Put/seqid=0 2024-12-03T13:06:45,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741842_1018 (size=12509) 2024-12-03T13:06:45,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741842_1018 (size=12509) 2024-12-03T13:06:45,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/42dc3d328de248f1a4c33304d4f5bf7e 2024-12-03T13:06:45,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/42dc3d328de248f1a4c33304d4f5bf7e as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/42dc3d328de248f1a4c33304d4f5bf7e 2024-12-03T13:06:45,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/42dc3d328de248f1a4c33304d4f5bf7e, entries=7, sequenceid=21, filesize=12.2 K 2024-12-03T13:06:46,159 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:46,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8e221e153e39a23226788dac72ada430 in 520ms, sequenceid=21, compaction requested=false 2024-12-03T13:06:46,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:06:46,160 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-03T13:06:46,160 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:06:46,161 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692 because midkey is the same as first or last row 2024-12-03T13:06:47,842 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:48,274 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T13:06:48,274 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T13:06:50,102 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:50,103 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C40603%2C1733231163292:(num 1733231198610) roll requested 2024-12-03T13:06:50,103 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 257 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:50,103 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231210103 2024-12-03T13:06:50,311 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:50,511 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:50,512 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231198610 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231210103 2024-12-03T13:06:50,512 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34175:34175),(127.0.0.1/127.0.0.1:41431:41431)] 2024-12-03T13:06:50,512 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231198610 is not closed yet, will try archiving it next time 2024-12-03T13:06:50,513 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231178540 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs/35946657df09%2C40603%2C1733231163292.1733231178540 2024-12-03T13:06:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741841_1017 (size=7739) 2024-12-03T13:06:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741841_1017 (size=7739) 2024-12-03T13:06:52,305 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:54,468 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 8e221e153e39a23226788dac72ada430, had cached 0 bytes from a total of 25018 2024-12-03T13:06:54,509 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:56,712 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:06:58,915 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:00,917 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T13:07:00,917 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231220917 2024-12-03T13:07:01,267 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:07:05,959 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5036 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:05,959 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5036 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:05,962 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C40603%2C1733231163292:(num 1733231220917) roll requested 2024-12-03T13:07:06,829 DEBUG [master/35946657df09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region cbc38bc35014cb0cf824dc8d928df3b8 changed from -1.0 to 0.0, refreshing cache 2024-12-03T13:07:10,960 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:10,960 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:10,961 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231210103 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231220917 2024-12-03T13:07:10,961 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:34175:34175)] 2024-12-03T13:07:10,961 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231210103 is not closed yet, will try archiving it next time 2024-12-03T13:07:10,961 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231230961 2024-12-03T13:07:10,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741843_1019 (size=4753) 2024-12-03T13:07:10,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741843_1019 (size=4753) 2024-12-03T13:07:15,964 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:15,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40603 {}] regionserver.HRegion(8581): Flush requested on 8e221e153e39a23226788dac72ada430 2024-12-03T13:07:15,964 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:15,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e221e153e39a23226788dac72ada430 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:07:15,983 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5019 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:15,983 WARN [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5019 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:17,965 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T13:07:20,966 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:20,966 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:20,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/143160a74040475e82987752d480b6ef is 1080, key is row0015/info:/1733231207641/Put/seqid=0 2024-12-03T13:07:20,987 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:20,987 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:20,989 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231220917 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231230961 2024-12-03T13:07:20,990 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:34175:34175)] 2024-12-03T13:07:20,990 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231220917 is not closed yet, will try archiving it next time 2024-12-03T13:07:20,990 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C40603%2C1733231163292:(num 1733231230961) roll requested 2024-12-03T13:07:20,990 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231240990 2024-12-03T13:07:20,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741844_1020 (size=1569) 2024-12-03T13:07:20,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741844_1020 (size=1569) 2024-12-03T13:07:21,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741846_1022 (size=12509) 2024-12-03T13:07:21,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741846_1022 (size=12509) 2024-12-03T13:07:21,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/143160a74040475e82987752d480b6ef 2024-12-03T13:07:21,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/143160a74040475e82987752d480b6ef as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/143160a74040475e82987752d480b6ef 2024-12-03T13:07:21,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/143160a74040475e82987752d480b6ef, entries=7, sequenceid=31, filesize=12.2 K 2024-12-03T13:07:26,012 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:26,012 WARN [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:26,457 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:26,457 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:26,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8e221e153e39a23226788dac72ada430 in 10493ms, sequenceid=31, compaction requested=true 2024-12-03T13:07:26,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:07:26,457 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-03T13:07:26,457 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:07:26,458 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692 because midkey is the same as first or last row 2024-12-03T13:07:26,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e221e153e39a23226788dac72ada430:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:07:26,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:07:26,459 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:07:26,464 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:07:26,465 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.HStore(1540): 8e221e153e39a23226788dac72ada430/info is initiating minor compaction (all files) 2024-12-03T13:07:26,466 INFO [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e221e153e39a23226788dac72ada430/info in TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:07:26,466 INFO [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692, hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/42dc3d328de248f1a4c33304d4f5bf7e, hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/143160a74040475e82987752d480b6ef] into tmpdir=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp, totalSize=36.6 K 2024-12-03T13:07:26,468 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] compactions.Compactor(224): Compacting da3c6053d83144c99cd29a98f62b2692, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733231178576 2024-12-03T13:07:26,469 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42dc3d328de248f1a4c33304d4f5bf7e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733231192601 2024-12-03T13:07:26,470 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] compactions.Compactor(224): Compacting 143160a74040475e82987752d480b6ef, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733231207641 2024-12-03T13:07:26,511 INFO [RS:0;35946657df09:40603-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e221e153e39a23226788dac72ada430#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:07:26,514 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/490bbdd14bd24156aeda52e4142a3a7c is 1080, key is row0001/info:/1733231178576/Put/seqid=0 2024-12-03T13:07:26,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741848_1024 (size=27710) 2024-12-03T13:07:26,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741848_1024 (size=27710) 2024-12-03T13:07:26,972 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/490bbdd14bd24156aeda52e4142a3a7c as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/490bbdd14bd24156aeda52e4142a3a7c 2024-12-03T13:07:31,012 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:31,012 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK], DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK]] 2024-12-03T13:07:31,013 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231230961 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231240990 2024-12-03T13:07:31,013 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34175:34175),(127.0.0.1/127.0.0.1:41431:41431)] 2024-12-03T13:07:31,013 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231230961 is not closed yet, will try archiving it next time 2024-12-03T13:07:31,014 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231198610 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs/35946657df09%2C40603%2C1733231163292.1733231198610 2024-12-03T13:07:31,014 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C40603%2C1733231163292:(num 1733231240990) roll requested 2024-12-03T13:07:31,014 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231251014 2024-12-03T13:07:31,017 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231210103 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs/35946657df09%2C40603%2C1733231163292.1733231210103 2024-12-03T13:07:31,019 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231220917 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs/35946657df09%2C40603%2C1733231163292.1733231220917 2024-12-03T13:07:31,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741845_1021 (size=438) 2024-12-03T13:07:31,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741845_1021 (size=438) 2024-12-03T13:07:31,027 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231230961 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs/35946657df09%2C40603%2C1733231163292.1733231230961 2024-12-03T13:07:31,267 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:07:36,014 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:36,014 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:36,017 INFO [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e221e153e39a23226788dac72ada430/info of 8e221e153e39a23226788dac72ada430 into 490bbdd14bd24156aeda52e4142a3a7c(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 9sec to execute. 2024-12-03T13:07:36,017 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:07:36,017 INFO [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430., storeName=8e221e153e39a23226788dac72ada430/info, priority=13, startTime=1733231246459; duration=9sec 2024-12-03T13:07:36,018 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-03T13:07:36,018 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:07:36,019 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/490bbdd14bd24156aeda52e4142a3a7c because midkey is the same as first or last row 2024-12-03T13:07:36,019 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:07:36,019 DEBUG [RS:0;35946657df09:40603-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e221e153e39a23226788dac72ada430:info 2024-12-03T13:07:36,058 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:36,058 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43439,DS-6473a138-8ded-4e00-a9ff-915b1763b253,DISK], DatanodeInfoWithStorage[127.0.0.1:45207,DS-584a9991-ef4e-4020-be2d-f5c069507376,DISK]] 2024-12-03T13:07:36,059 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231240990 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231251014 2024-12-03T13:07:36,059 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34175:34175),(127.0.0.1/127.0.0.1:41431:41431)] 2024-12-03T13:07:36,059 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231240990 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,060 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231256059 2024-12-03T13:07:36,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741847_1023 (size=539) 2024-12-03T13:07:36,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741847_1023 (size=539) 2024-12-03T13:07:36,077 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231251014 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231256059 2024-12-03T13:07:36,078 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34175:34175),(127.0.0.1/127.0.0.1:41431:41431)] 2024-12-03T13:07:36,078 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231240990 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,078 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231251014 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,078 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C40603%2C1733231163292:(num 1733231256059) roll requested 2024-12-03T13:07:36,078 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40603%2C1733231163292.1733231256078 2024-12-03T13:07:36,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741849_1025 (size=1258) 2024-12-03T13:07:36,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741849_1025 (size=1258) 2024-12-03T13:07:36,094 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231256059 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231256078 2024-12-03T13:07:36,094 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41431:41431),(127.0.0.1/127.0.0.1:34175:34175)] 2024-12-03T13:07:36,095 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231240990 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,095 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231251014 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,095 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231256059 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741850_1026 (size=93) 2024-12-03T13:07:36,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741850_1026 (size=93) 2024-12-03T13:07:36,463 DEBUG [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231251014 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,463 DEBUG [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231256059 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,463 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231240990 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs/35946657df09%2C40603%2C1733231163292.1733231240990 2024-12-03T13:07:36,484 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231256059 is not closed yet, will try archiving it next time 2024-12-03T13:07:36,503 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292/35946657df09%2C40603%2C1733231163292.1733231256059 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs/35946657df09%2C40603%2C1733231163292.1733231256059 2024-12-03T13:07:39,469 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 8e221e153e39a23226788dac72ada430, had cached 0 bytes from a total of 27710 2024-12-03T13:07:48,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40603 {}] regionserver.HRegion(8581): Flush requested on 8e221e153e39a23226788dac72ada430 2024-12-03T13:07:48,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e221e153e39a23226788dac72ada430 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:07:48,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/6c932b6260ce43cfa6cb26777c9129c8 is 1080, key is row0022/info:/1733231256061/Put/seqid=0 2024-12-03T13:07:48,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741852_1028 (size=12509) 2024-12-03T13:07:48,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741852_1028 (size=12509) 2024-12-03T13:07:48,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/6c932b6260ce43cfa6cb26777c9129c8 2024-12-03T13:07:48,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/6c932b6260ce43cfa6cb26777c9129c8 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/6c932b6260ce43cfa6cb26777c9129c8 2024-12-03T13:07:48,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/6c932b6260ce43cfa6cb26777c9129c8, entries=7, sequenceid=42, filesize=12.2 K 2024-12-03T13:07:48,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8e221e153e39a23226788dac72ada430 in 476ms, sequenceid=42, compaction requested=false 2024-12-03T13:07:48,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:07:48,559 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-03T13:07:48,559 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:07:48,559 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/490bbdd14bd24156aeda52e4142a3a7c because midkey is the same as first or last row 2024-12-03T13:07:56,092 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-03T13:07:56,092 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T13:07:56,092 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x260e636b to 127.0.0.1:58576 2024-12-03T13:07:56,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:07:56,093 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T13:07:56,093 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1741889779, stopped=false 2024-12-03T13:07:56,093 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=35946657df09,38233,1733231162231 2024-12-03T13:07:56,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:07:56,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:07:56,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:07:56,102 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-03T13:07:56,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:07:56,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:07:56,103 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,40603,1733231163292' ***** 2024-12-03T13:07:56,103 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T13:07:56,103 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:07:56,103 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:07:56,103 INFO [RS:0;35946657df09:40603 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T13:07:56,104 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T13:07:56,104 INFO [RS:0;35946657df09:40603 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T13:07:56,104 INFO [RS:0;35946657df09:40603 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T13:07:56,104 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(3579): Received CLOSE for 8e221e153e39a23226788dac72ada430 2024-12-03T13:07:56,104 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(3579): Received CLOSE for cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:07:56,105 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,40603,1733231163292 2024-12-03T13:07:56,105 DEBUG [RS:0;35946657df09:40603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:07:56,105 INFO [RS:0;35946657df09:40603 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T13:07:56,105 INFO [RS:0;35946657df09:40603 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T13:07:56,105 INFO [RS:0;35946657df09:40603 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T13:07:56,105 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 8e221e153e39a23226788dac72ada430, disabling compactions & flushes 2024-12-03T13:07:56,105 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T13:07:56,105 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:07:56,105 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:07:56,105 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. after waiting 0 ms 2024-12-03T13:07:56,105 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:07:56,105 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-03T13:07:56,105 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1603): Online Regions={8e221e153e39a23226788dac72ada430=TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430., 1588230740=hbase:meta,,1.1588230740, cbc38bc35014cb0cf824dc8d928df3b8=hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8.} 2024-12-03T13:07:56,105 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:07:56,105 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:07:56,105 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 8e221e153e39a23226788dac72ada430 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-03T13:07:56,105 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:07:56,105 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:07:56,106 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:07:56,106 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-03T13:07:56,106 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 8e221e153e39a23226788dac72ada430, cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:07:56,111 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/9a6aa665295c4e3ab86c5a85699f9d19 is 1080, key is row0029/info:/1733231270084/Put/seqid=0 2024-12-03T13:07:56,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741853_1029 (size=8193) 2024-12-03T13:07:56,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741853_1029 (size=8193) 2024-12-03T13:07:56,123 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/9a6aa665295c4e3ab86c5a85699f9d19 2024-12-03T13:07:56,137 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/.tmp/info/b5b5a51a0e8c4458941d0f63330d5254 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430./info:regioninfo/1733231169597/Put/seqid=0 2024-12-03T13:07:56,142 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/.tmp/info/9a6aa665295c4e3ab86c5a85699f9d19 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/9a6aa665295c4e3ab86c5a85699f9d19 2024-12-03T13:07:56,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741854_1030 (size=8172) 2024-12-03T13:07:56,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741854_1030 (size=8172) 2024-12-03T13:07:56,152 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/.tmp/info/b5b5a51a0e8c4458941d0f63330d5254 2024-12-03T13:07:56,160 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/9a6aa665295c4e3ab86c5a85699f9d19, entries=3, sequenceid=48, filesize=8.0 K 2024-12-03T13:07:56,162 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 8e221e153e39a23226788dac72ada430 in 56ms, sequenceid=48, compaction requested=true 2024-12-03T13:07:56,162 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692, hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/42dc3d328de248f1a4c33304d4f5bf7e, hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/143160a74040475e82987752d480b6ef] to archive 2024-12-03T13:07:56,166 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T13:07:56,169 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692 to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/archive/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/da3c6053d83144c99cd29a98f62b2692 2024-12-03T13:07:56,172 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/42dc3d328de248f1a4c33304d4f5bf7e to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/archive/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/42dc3d328de248f1a4c33304d4f5bf7e 2024-12-03T13:07:56,175 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/143160a74040475e82987752d480b6ef to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/archive/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/info/143160a74040475e82987752d480b6ef 2024-12-03T13:07:56,187 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/.tmp/table/cd9dff7abe00457fb0e578550e535df6 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733231169625/Put/seqid=0 2024-12-03T13:07:56,194 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/default/TestLogRolling-testSlowSyncLogRolling/8e221e153e39a23226788dac72ada430/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-03T13:07:56,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741855_1031 (size=5452) 2024-12-03T13:07:56,197 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:07:56,197 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 8e221e153e39a23226788dac72ada430: 2024-12-03T13:07:56,197 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733231168506.8e221e153e39a23226788dac72ada430. 2024-12-03T13:07:56,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741855_1031 (size=5452) 2024-12-03T13:07:56,198 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing cbc38bc35014cb0cf824dc8d928df3b8, disabling compactions & flushes 2024-12-03T13:07:56,198 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:07:56,198 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:07:56,198 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. after waiting 0 ms 2024-12-03T13:07:56,198 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:07:56,198 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing cbc38bc35014cb0cf824dc8d928df3b8 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-03T13:07:56,198 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/.tmp/table/cd9dff7abe00457fb0e578550e535df6 2024-12-03T13:07:56,209 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/.tmp/info/b5b5a51a0e8c4458941d0f63330d5254 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/info/b5b5a51a0e8c4458941d0f63330d5254 2024-12-03T13:07:56,218 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/info/b5b5a51a0e8c4458941d0f63330d5254, entries=20, sequenceid=14, filesize=8.0 K 2024-12-03T13:07:56,220 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/.tmp/table/cd9dff7abe00457fb0e578550e535df6 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/table/cd9dff7abe00457fb0e578550e535df6 2024-12-03T13:07:56,223 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8/.tmp/info/7f9d616ec8534a6099a0d97a28f3dab6 is 45, key is default/info:d/1733231168053/Put/seqid=0 2024-12-03T13:07:56,229 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/table/cd9dff7abe00457fb0e578550e535df6, entries=4, sequenceid=14, filesize=5.3 K 2024-12-03T13:07:56,231 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=14, compaction requested=false 2024-12-03T13:07:56,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741856_1032 (size=5037) 2024-12-03T13:07:56,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741856_1032 (size=5037) 2024-12-03T13:07:56,239 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-03T13:07:56,240 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T13:07:56,240 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:07:56,241 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:07:56,241 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T13:07:56,306 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1629): Waiting on cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:07:56,507 DEBUG [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1629): Waiting on cbc38bc35014cb0cf824dc8d928df3b8 2024-12-03T13:07:56,636 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8/.tmp/info/7f9d616ec8534a6099a0d97a28f3dab6 2024-12-03T13:07:56,647 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8/.tmp/info/7f9d616ec8534a6099a0d97a28f3dab6 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8/info/7f9d616ec8534a6099a0d97a28f3dab6 2024-12-03T13:07:56,660 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8/info/7f9d616ec8534a6099a0d97a28f3dab6, entries=2, sequenceid=6, filesize=4.9 K 2024-12-03T13:07:56,662 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for cbc38bc35014cb0cf824dc8d928df3b8 in 464ms, sequenceid=6, compaction requested=false 2024-12-03T13:07:56,675 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/data/hbase/namespace/cbc38bc35014cb0cf824dc8d928df3b8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T13:07:56,676 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:07:56,677 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for cbc38bc35014cb0cf824dc8d928df3b8: 2024-12-03T13:07:56,677 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733231166830.cbc38bc35014cb0cf824dc8d928df3b8. 2024-12-03T13:07:56,707 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,40603,1733231163292; all regions closed. 2024-12-03T13:07:56,708 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292 2024-12-03T13:07:56,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741834_1010 (size=4330) 2024-12-03T13:07:56,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741834_1010 (size=4330) 2024-12-03T13:07:57,117 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T13:07:57,117 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T13:07:57,119 DEBUG [RS:0;35946657df09:40603 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs 2024-12-03T13:07:57,119 INFO [RS:0;35946657df09:40603 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C40603%2C1733231163292.meta:.meta(num 1733231166528) 2024-12-03T13:07:57,119 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/WALs/35946657df09,40603,1733231163292 2024-12-03T13:07:57,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741851_1027 (size=13066) 2024-12-03T13:07:57,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741851_1027 (size=13066) 2024-12-03T13:07:57,132 DEBUG [RS:0;35946657df09:40603 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/oldWALs 2024-12-03T13:07:57,132 INFO [RS:0;35946657df09:40603 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C40603%2C1733231163292:(num 1733231256078) 2024-12-03T13:07:57,132 DEBUG [RS:0;35946657df09:40603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:07:57,132 INFO [RS:0;35946657df09:40603 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:07:57,132 INFO [RS:0;35946657df09:40603 {}] hbase.ChoreService(370): Chore service for: regionserver/35946657df09:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-03T13:07:57,133 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:07:57,133 INFO [RS:0;35946657df09:40603 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40603 2024-12-03T13:07:57,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/35946657df09,40603,1733231163292 2024-12-03T13:07:57,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:07:57,145 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [35946657df09,40603,1733231163292] 2024-12-03T13:07:57,145 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 35946657df09,40603,1733231163292; numProcessing=1 2024-12-03T13:07:57,177 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/35946657df09,40603,1733231163292 already deleted, retry=false 2024-12-03T13:07:57,177 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 35946657df09,40603,1733231163292 expired; onlineServers=0 2024-12-03T13:07:57,177 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,38233,1733231162231' ***** 2024-12-03T13:07:57,177 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T13:07:57,178 DEBUG [M:0;35946657df09:38233 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a99cf3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:07:57,178 INFO [M:0;35946657df09:38233 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,38233,1733231162231 2024-12-03T13:07:57,178 INFO [M:0;35946657df09:38233 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,38233,1733231162231; all regions closed. 2024-12-03T13:07:57,178 DEBUG [M:0;35946657df09:38233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:07:57,178 DEBUG [M:0;35946657df09:38233 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T13:07:57,178 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T13:07:57,178 DEBUG [M:0;35946657df09:38233 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T13:07:57,178 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231165808 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231165808,5,FailOnTimeoutGroup] 2024-12-03T13:07:57,178 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231165806 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231165806,5,FailOnTimeoutGroup] 2024-12-03T13:07:57,178 INFO [M:0;35946657df09:38233 {}] hbase.ChoreService(370): Chore service for: master/35946657df09:0 had [] on shutdown 2024-12-03T13:07:57,178 DEBUG [M:0;35946657df09:38233 {}] master.HMaster(1733): Stopping service threads 2024-12-03T13:07:57,178 INFO [M:0;35946657df09:38233 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T13:07:57,179 INFO [M:0;35946657df09:38233 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T13:07:57,179 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T13:07:57,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T13:07:57,194 DEBUG [M:0;35946657df09:38233 {}] zookeeper.ZKUtil(347): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T13:07:57,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:07:57,194 WARN [M:0;35946657df09:38233 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T13:07:57,194 INFO [M:0;35946657df09:38233 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-03T13:07:57,194 INFO [M:0;35946657df09:38233 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T13:07:57,195 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:07:57,195 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:07:57,195 INFO [M:0;35946657df09:38233 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:57,195 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:57,195 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:07:57,195 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:57,195 INFO [M:0;35946657df09:38233 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.21 KB heapSize=50.14 KB 2024-12-03T13:07:57,220 DEBUG [M:0;35946657df09:38233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1119f5f590da4b7ca6d8372bcb0ac687 is 82, key is hbase:meta,,1/info:regioninfo/1733231166634/Put/seqid=0 2024-12-03T13:07:57,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741857_1033 (size=5672) 2024-12-03T13:07:57,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741857_1033 (size=5672) 2024-12-03T13:07:57,233 INFO [M:0;35946657df09:38233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1119f5f590da4b7ca6d8372bcb0ac687 2024-12-03T13:07:57,258 DEBUG [M:0;35946657df09:38233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/301190ca029b42308403606821474126 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733231169661/Put/seqid=0 2024-12-03T13:07:57,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:07:57,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40603-0x1019c9652780001, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:07:57,261 INFO [RS:0;35946657df09:40603 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,40603,1733231163292; zookeeper connection closed. 2024-12-03T13:07:57,261 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5209164a {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5209164a 2024-12-03T13:07:57,262 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T13:07:57,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741858_1034 (size=6426) 2024-12-03T13:07:57,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741858_1034 (size=6426) 2024-12-03T13:07:57,273 INFO [M:0;35946657df09:38233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.61 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/301190ca029b42308403606821474126 2024-12-03T13:07:57,283 INFO [M:0;35946657df09:38233 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 301190ca029b42308403606821474126 2024-12-03T13:07:57,299 DEBUG [M:0;35946657df09:38233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69f828834dbb4b2093043ffad3198e17 is 69, key is 35946657df09,40603,1733231163292/rs:state/1733231165913/Put/seqid=0 2024-12-03T13:07:57,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741859_1035 (size=5156) 2024-12-03T13:07:57,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741859_1035 (size=5156) 2024-12-03T13:07:57,314 INFO [M:0;35946657df09:38233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69f828834dbb4b2093043ffad3198e17 2024-12-03T13:07:57,347 DEBUG [M:0;35946657df09:38233 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a556fca1d7564777a5d132c2963e221e is 52, key is load_balancer_on/state:d/1733231168451/Put/seqid=0 2024-12-03T13:07:57,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741860_1036 (size=5056) 2024-12-03T13:07:57,362 INFO [M:0;35946657df09:38233 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a556fca1d7564777a5d132c2963e221e 2024-12-03T13:07:57,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741860_1036 (size=5056) 2024-12-03T13:07:57,372 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1119f5f590da4b7ca6d8372bcb0ac687 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1119f5f590da4b7ca6d8372bcb0ac687 2024-12-03T13:07:57,380 INFO [M:0;35946657df09:38233 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1119f5f590da4b7ca6d8372bcb0ac687, entries=8, sequenceid=104, filesize=5.5 K 2024-12-03T13:07:57,388 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/301190ca029b42308403606821474126 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/301190ca029b42308403606821474126 2024-12-03T13:07:57,409 INFO [M:0;35946657df09:38233 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 301190ca029b42308403606821474126 2024-12-03T13:07:57,415 INFO [M:0;35946657df09:38233 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/301190ca029b42308403606821474126, entries=11, sequenceid=104, filesize=6.3 K 2024-12-03T13:07:57,417 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/69f828834dbb4b2093043ffad3198e17 as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/69f828834dbb4b2093043ffad3198e17 2024-12-03T13:07:57,428 INFO [M:0;35946657df09:38233 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/69f828834dbb4b2093043ffad3198e17, entries=1, sequenceid=104, filesize=5.0 K 2024-12-03T13:07:57,431 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a556fca1d7564777a5d132c2963e221e as hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a556fca1d7564777a5d132c2963e221e 2024-12-03T13:07:57,448 INFO [M:0;35946657df09:38233 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a556fca1d7564777a5d132c2963e221e, entries=1, sequenceid=104, filesize=4.9 K 2024-12-03T13:07:57,451 INFO [M:0;35946657df09:38233 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.21 KB/41173, heapSize ~50.08 KB/51280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 256ms, sequenceid=104, compaction requested=false 2024-12-03T13:07:57,472 INFO [M:0;35946657df09:38233 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:57,472 DEBUG [M:0;35946657df09:38233 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:07:57,473 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/MasterData/WALs/35946657df09,38233,1733231162231 2024-12-03T13:07:57,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741830_1006 (size=48474) 2024-12-03T13:07:57,478 INFO [M:0;35946657df09:38233 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-03T13:07:57,478 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:07:57,478 INFO [M:0;35946657df09:38233 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38233 2024-12-03T13:07:57,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45207 is added to blk_1073741830_1006 (size=48474) 2024-12-03T13:07:57,490 DEBUG [M:0;35946657df09:38233 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/35946657df09,38233,1733231162231 already deleted, retry=false 2024-12-03T13:07:57,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:07:57,598 INFO [M:0;35946657df09:38233 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,38233,1733231162231; zookeeper connection closed. 2024-12-03T13:07:57,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38233-0x1019c9652780000, quorum=127.0.0.1:58576, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:07:57,604 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e210958{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:07:57,608 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5298b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:07:57,608 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:07:57,609 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5303a55e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:07:57,609 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27e71ed3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir/,STOPPED} 2024-12-03T13:07:57,616 WARN [BP-1197494549-172.17.0.2-1733231157423 heartbeating to localhost/127.0.0.1:39447 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:07:57,616 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:07:57,616 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:07:57,616 WARN [BP-1197494549-172.17.0.2-1733231157423 heartbeating to localhost/127.0.0.1:39447 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1197494549-172.17.0.2-1733231157423 (Datanode Uuid d3e3b013-4b2c-4727-8381-1b9b0b18c190) service to localhost/127.0.0.1:39447 2024-12-03T13:07:57,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data3/current/BP-1197494549-172.17.0.2-1733231157423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:07:57,619 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data4/current/BP-1197494549-172.17.0.2-1733231157423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:07:57,619 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:07:57,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5bf4a5dd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:07:57,632 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38b7daa8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:07:57,632 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:07:57,632 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77047f54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:07:57,632 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6659e51{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir/,STOPPED} 2024-12-03T13:07:57,636 WARN [BP-1197494549-172.17.0.2-1733231157423 heartbeating to localhost/127.0.0.1:39447 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:07:57,636 WARN [BP-1197494549-172.17.0.2-1733231157423 heartbeating to localhost/127.0.0.1:39447 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1197494549-172.17.0.2-1733231157423 (Datanode Uuid a4f432e0-94c2-4c91-abbb-cdae87e4ad4c) service to localhost/127.0.0.1:39447 2024-12-03T13:07:57,637 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data2/current/BP-1197494549-172.17.0.2-1733231157423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:07:57,637 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:07:57,637 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:07:57,637 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:07:57,638 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/cluster_e02847aa-a639-dbe2-f596-1121a5b8dbf4/dfs/data/data1/current/BP-1197494549-172.17.0.2-1733231157423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:07:57,650 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c97b431{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:07:57,651 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5718b892{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:07:57,651 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:07:57,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75cb83b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:07:57,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24b482a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir/,STOPPED} 2024-12-03T13:07:57,662 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-03T13:07:57,711 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-03T13:07:57,724 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=63 (was 12) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@56a03b7f java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39447 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39447 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/35946657df09:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:39447 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:39447 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:39447 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39447 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39447 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/35946657df09:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39447 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/35946657df09:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/35946657df09:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=403 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=873 (was 1461), ProcessCount=11 (was 11), AvailableMemoryMB=1648 (was 2576) 2024-12-03T13:07:57,733 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=64, OpenFileDescriptor=403, MaxFileDescriptor=1048576, SystemLoadAverage=873, ProcessCount=11, AvailableMemoryMB=1647 2024-12-03T13:07:57,733 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T13:07:57,733 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.log.dir so I do NOT create it in target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/8e28db5e-4a37-11f2-aa6e-ca8cf2dd9adc/hadoop.tmp.dir so I do NOT create it in target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d, deleteOnExit=true 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/test.cache.data in system properties and HBase conf 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir in system properties and HBase conf 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T13:07:57,734 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T13:07:57,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-03T13:07:57,735 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T13:07:57,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:07:57,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:07:57,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T13:07:57,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:07:57,735 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/nfs.dump.dir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T13:07:57,736 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T13:07:57,755 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:07:58,028 INFO [regionserver/35946657df09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:07:58,069 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:07:58,077 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:07:58,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:07:58,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:07:58,085 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:07:58,088 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:07:58,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59c96799{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:07:58,092 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23d7b823{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:07:58,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38c1fd10{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir/jetty-localhost-38097-hadoop-hdfs-3_4_1-tests_jar-_-any-6396717599772392684/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:07:58,226 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@613da372{HTTP/1.1, (http/1.1)}{localhost:38097} 2024-12-03T13:07:58,227 INFO [Time-limited test {}] server.Server(415): Started @123991ms 2024-12-03T13:07:58,245 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:07:58,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:07:58,450 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:07:58,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:07:58,451 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:07:58,451 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:07:58,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@439eb1c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:07:58,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c443c4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:07:58,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@469f194a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir/jetty-localhost-40479-hadoop-hdfs-3_4_1-tests_jar-_-any-17083061682121698499/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:07:58,556 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34592b2d{HTTP/1.1, (http/1.1)}{localhost:40479} 2024-12-03T13:07:58,556 INFO [Time-limited test {}] server.Server(415): Started @124321ms 2024-12-03T13:07:58,557 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:07:58,613 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:07:58,618 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:07:58,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:07:58,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:07:58,623 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:07:58,623 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@623245d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:07:58,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19d7702a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:07:58,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b765977{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir/jetty-localhost-36063-hadoop-hdfs-3_4_1-tests_jar-_-any-10242215881879910526/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:07:58,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50ff44c9{HTTP/1.1, (http/1.1)}{localhost:36063} 2024-12-03T13:07:58,726 INFO [Time-limited test {}] server.Server(415): Started @124491ms 2024-12-03T13:07:58,728 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:07:59,198 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data1/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:07:59,198 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data2/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:07:59,234 WARN [Thread-429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:07:59,237 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67979dfd1ffaec97 with lease ID 0x147fe8b21f88577a: Processing first storage report for DS-331304b8-ae9c-4bbd-903a-0b69df524b18 from datanode DatanodeRegistration(127.0.0.1:41871, datanodeUuid=6714c1e8-3a43-4bf4-9fc7-149c84e2fc1d, infoPort=41005, infoSecurePort=0, ipcPort=42683, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:07:59,238 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67979dfd1ffaec97 with lease ID 0x147fe8b21f88577a: from storage DS-331304b8-ae9c-4bbd-903a-0b69df524b18 node DatanodeRegistration(127.0.0.1:41871, datanodeUuid=6714c1e8-3a43-4bf4-9fc7-149c84e2fc1d, infoPort=41005, infoSecurePort=0, ipcPort=42683, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:07:59,238 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67979dfd1ffaec97 with lease ID 0x147fe8b21f88577a: Processing first storage report for DS-4f28039e-cadb-485f-9091-b6f6c4c13e09 from datanode DatanodeRegistration(127.0.0.1:41871, datanodeUuid=6714c1e8-3a43-4bf4-9fc7-149c84e2fc1d, infoPort=41005, infoSecurePort=0, ipcPort=42683, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:07:59,238 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67979dfd1ffaec97 with lease ID 0x147fe8b21f88577a: from storage DS-4f28039e-cadb-485f-9091-b6f6c4c13e09 node DatanodeRegistration(127.0.0.1:41871, datanodeUuid=6714c1e8-3a43-4bf4-9fc7-149c84e2fc1d, infoPort=41005, infoSecurePort=0, ipcPort=42683, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:07:59,407 WARN [Thread-476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data3/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:07:59,407 WARN [Thread-477 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data4/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:07:59,435 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:07:59,441 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde2a37d0e6e537e with lease ID 0x147fe8b21f88577b: Processing first storage report for DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f from datanode DatanodeRegistration(127.0.0.1:38443, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=43607, infoSecurePort=0, ipcPort=45845, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:07:59,442 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde2a37d0e6e537e with lease ID 0x147fe8b21f88577b: from storage DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f node DatanodeRegistration(127.0.0.1:38443, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=43607, infoSecurePort=0, ipcPort=45845, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:07:59,442 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde2a37d0e6e537e with lease ID 0x147fe8b21f88577b: Processing first storage report for DS-13f8643f-ff8f-4072-94dd-a3c1e350d374 from datanode DatanodeRegistration(127.0.0.1:38443, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=43607, infoSecurePort=0, ipcPort=45845, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:07:59,442 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde2a37d0e6e537e with lease ID 0x147fe8b21f88577b: from storage DS-13f8643f-ff8f-4072-94dd-a3c1e350d374 node DatanodeRegistration(127.0.0.1:38443, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=43607, infoSecurePort=0, ipcPort=45845, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:07:59,484 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2 2024-12-03T13:07:59,510 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/zookeeper_0, clientPort=53253, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T13:07:59,516 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=53253 2024-12-03T13:07:59,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:07:59,520 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:07:59,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:07:59,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:07:59,552 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f with version=8 2024-12-03T13:07:59,552 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/hbase-staging 2024-12-03T13:07:59,554 INFO [Time-limited test {}] client.ConnectionUtils(129): master/35946657df09:0 server-side Connection retries=45 2024-12-03T13:07:59,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:07:59,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:07:59,555 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:07:59,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:07:59,555 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:07:59,555 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:07:59,555 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:07:59,556 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40851 2024-12-03T13:07:59,556 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:07:59,559 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:07:59,563 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40851 connecting to ZooKeeper ensemble=127.0.0.1:53253 2024-12-03T13:07:59,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:408510x0, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:07:59,642 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40851-0x1019c9820000000 connected 2024-12-03T13:07:59,702 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:07:59,703 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:07:59,703 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:07:59,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40851 2024-12-03T13:07:59,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40851 2024-12-03T13:07:59,704 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40851 2024-12-03T13:07:59,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40851 2024-12-03T13:07:59,705 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40851 2024-12-03T13:07:59,705 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f, hbase.cluster.distributed=false 2024-12-03T13:07:59,725 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/35946657df09:0 server-side Connection retries=45 2024-12-03T13:07:59,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:07:59,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:07:59,725 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:07:59,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:07:59,725 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:07:59,725 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:07:59,725 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:07:59,726 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42823 2024-12-03T13:07:59,726 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T13:07:59,727 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T13:07:59,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:07:59,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:07:59,733 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42823 connecting to ZooKeeper ensemble=127.0.0.1:53253 2024-12-03T13:07:59,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428230x0, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:07:59,744 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428230x0, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:07:59,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42823-0x1019c9820000001 connected 2024-12-03T13:07:59,745 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:07:59,745 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:07:59,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42823 2024-12-03T13:07:59,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42823 2024-12-03T13:07:59,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42823 2024-12-03T13:07:59,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42823 2024-12-03T13:07:59,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42823 2024-12-03T13:07:59,751 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/35946657df09,40851,1733231279554 2024-12-03T13:07:59,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:07:59,761 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/35946657df09,40851,1733231279554 2024-12-03T13:07:59,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:07:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:07:59,769 DEBUG [M:0;35946657df09:40851 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;35946657df09:40851 2024-12-03T13:07:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:07:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:07:59,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:07:59,770 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:07:59,771 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/35946657df09,40851,1733231279554 from backup master directory 2024-12-03T13:07:59,771 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:07:59,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:07:59,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/35946657df09,40851,1733231279554 2024-12-03T13:07:59,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:07:59,777 WARN [master/35946657df09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:07:59,777 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=35946657df09,40851,1733231279554 2024-12-03T13:07:59,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:07:59,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:07:59,800 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/hbase.id with ID: 229c3d9f-2215-4539-8004-5d0b427619ac 2024-12-03T13:07:59,820 INFO [master/35946657df09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:07:59,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:07:59,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:07:59,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:07:59,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:07:59,853 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:07:59,854 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T13:07:59,854 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:07:59,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:07:59,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:07:59,866 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store 2024-12-03T13:07:59,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:07:59,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:07:59,881 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:07:59,881 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:07:59,881 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:59,881 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:59,881 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:07:59,881 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:59,881 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:07:59,881 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:07:59,882 WARN [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/.initializing 2024-12-03T13:07:59,882 DEBUG [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554 2024-12-03T13:07:59,886 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C40851%2C1733231279554, suffix=, logDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554, archiveDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/oldWALs, maxLogs=10 2024-12-03T13:07:59,887 INFO [master/35946657df09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40851%2C1733231279554.1733231279886 2024-12-03T13:07:59,901 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 2024-12-03T13:07:59,901 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41005:41005),(127.0.0.1/127.0.0.1:43607:43607)] 2024-12-03T13:07:59,902 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:07:59,902 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:07:59,902 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,902 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T13:07:59,907 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:07:59,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:07:59,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,913 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T13:07:59,913 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:07:59,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:07:59,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T13:07:59,924 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:07:59,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:07:59,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T13:07:59,930 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:07:59,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:07:59,932 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,933 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,936 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T13:07:59,937 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:07:59,944 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:07:59,945 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753893, jitterRate=-0.041375890374183655}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T13:07:59,946 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:07:59,947 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T13:07:59,965 DEBUG [master/35946657df09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@621c7fe3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:07:59,968 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-03T13:07:59,968 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T13:07:59,968 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T13:07:59,968 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T13:07:59,969 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T13:07:59,970 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-03T13:07:59,970 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T13:07:59,979 INFO [master/35946657df09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T13:07:59,982 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T13:07:59,985 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-03T13:07:59,986 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T13:07:59,992 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T13:08:00,002 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-03T13:08:00,006 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T13:08:00,008 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T13:08:00,019 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-03T13:08:00,020 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T13:08:00,027 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T13:08:00,028 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T13:08:00,035 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T13:08:00,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:08:00,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:08:00,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,047 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=35946657df09,40851,1733231279554, sessionid=0x1019c9820000000, setting cluster-up flag (Was=false) 2024-12-03T13:08:00,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,085 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T13:08:00,086 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,40851,1733231279554 2024-12-03T13:08:00,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,127 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T13:08:00,128 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,40851,1733231279554 2024-12-03T13:08:00,131 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-03T13:08:00,131 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-03T13:08:00,131 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T13:08:00,132 DEBUG [master/35946657df09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 35946657df09,40851,1733231279554 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T13:08:00,132 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:08:00,132 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:08:00,132 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:08:00,133 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:08:00,133 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/35946657df09:0, corePoolSize=10, maxPoolSize=10 2024-12-03T13:08:00,133 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,133 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:08:00,133 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,134 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733231310134 2024-12-03T13:08:00,134 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T13:08:00,135 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T13:08:00,135 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T13:08:00,135 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T13:08:00,136 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T13:08:00,136 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T13:08:00,136 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T13:08:00,136 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:00,136 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231280136,5,FailOnTimeoutGroup] 2024-12-03T13:08:00,137 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231280136,5,FailOnTimeoutGroup] 2024-12-03T13:08:00,137 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,137 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:08:00,137 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T13:08:00,137 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,137 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:08:00,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:08:00,162 DEBUG [RS:0;35946657df09:42823 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;35946657df09:42823 2024-12-03T13:08:00,163 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1008): ClusterId : 229c3d9f-2215-4539-8004-5d0b427619ac 2024-12-03T13:08:00,163 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T13:08:00,188 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T13:08:00,188 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T13:08:00,205 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T13:08:00,205 DEBUG [RS:0;35946657df09:42823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20848241, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:08:00,206 DEBUG [RS:0;35946657df09:42823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d1013be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:08:00,206 INFO [RS:0;35946657df09:42823 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T13:08:00,206 INFO [RS:0;35946657df09:42823 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T13:08:00,206 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T13:08:00,207 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(3073): reportForDuty to master=35946657df09,40851,1733231279554 with isa=35946657df09/172.17.0.2:42823, startcode=1733231279724 2024-12-03T13:08:00,207 DEBUG [RS:0;35946657df09:42823 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:08:00,219 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40739, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:08:00,220 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40851 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 35946657df09,42823,1733231279724 2024-12-03T13:08:00,220 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40851 {}] master.ServerManager(486): Registering regionserver=35946657df09,42823,1733231279724 2024-12-03T13:08:00,222 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f 2024-12-03T13:08:00,222 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:42201 2024-12-03T13:08:00,222 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T13:08:00,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:08:00,229 DEBUG [RS:0;35946657df09:42823 {}] zookeeper.ZKUtil(111): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/35946657df09,42823,1733231279724 2024-12-03T13:08:00,229 WARN [RS:0;35946657df09:42823 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:08:00,229 INFO [RS:0;35946657df09:42823 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:08:00,229 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724 2024-12-03T13:08:00,240 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [35946657df09,42823,1733231279724] 2024-12-03T13:08:00,243 DEBUG [RS:0;35946657df09:42823 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T13:08:00,244 INFO [RS:0;35946657df09:42823 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T13:08:00,246 INFO [RS:0;35946657df09:42823 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T13:08:00,248 INFO [RS:0;35946657df09:42823 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:08:00,248 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,249 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T13:08:00,256 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,256 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,257 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,257 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,257 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:00,257 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:08:00,257 DEBUG [RS:0;35946657df09:42823 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:08:00,257 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,258 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,258 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,258 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,258 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,42823,1733231279724-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:08:00,279 INFO [RS:0;35946657df09:42823 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T13:08:00,280 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,42823,1733231279724-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:00,302 INFO [RS:0;35946657df09:42823 {}] regionserver.Replication(204): 35946657df09,42823,1733231279724 started 2024-12-03T13:08:00,302 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1767): Serving as 35946657df09,42823,1733231279724, RpcServer on 35946657df09/172.17.0.2:42823, sessionid=0x1019c9820000001 2024-12-03T13:08:00,302 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T13:08:00,302 DEBUG [RS:0;35946657df09:42823 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 35946657df09,42823,1733231279724 2024-12-03T13:08:00,302 DEBUG [RS:0;35946657df09:42823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,42823,1733231279724' 2024-12-03T13:08:00,302 DEBUG [RS:0;35946657df09:42823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T13:08:00,303 DEBUG [RS:0;35946657df09:42823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T13:08:00,304 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T13:08:00,304 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T13:08:00,304 DEBUG [RS:0;35946657df09:42823 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 35946657df09,42823,1733231279724 2024-12-03T13:08:00,304 DEBUG [RS:0;35946657df09:42823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,42823,1733231279724' 2024-12-03T13:08:00,304 DEBUG [RS:0;35946657df09:42823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T13:08:00,304 DEBUG [RS:0;35946657df09:42823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T13:08:00,305 DEBUG [RS:0;35946657df09:42823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T13:08:00,305 INFO [RS:0;35946657df09:42823 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T13:08:00,305 INFO [RS:0;35946657df09:42823 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T13:08:00,408 INFO [RS:0;35946657df09:42823 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C42823%2C1733231279724, suffix=, logDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724, archiveDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/oldWALs, maxLogs=32 2024-12-03T13:08:00,410 INFO [RS:0;35946657df09:42823 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.1733231280409 2024-12-03T13:08:00,417 INFO [RS:0;35946657df09:42823 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 2024-12-03T13:08:00,417 DEBUG [RS:0;35946657df09:42823 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41005:41005),(127.0.0.1/127.0.0.1:43607:43607)] 2024-12-03T13:08:00,549 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-03T13:08:00,549 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f 2024-12-03T13:08:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741833_1009 (size=32) 2024-12-03T13:08:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741833_1009 (size=32) 2024-12-03T13:08:00,556 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:08:00,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:08:00,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:08:00,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:00,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:08:00,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:08:00,565 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:08:00,565 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:00,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:08:00,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:08:00,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:08:00,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:00,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:08:00,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/meta/1588230740 2024-12-03T13:08:00,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/meta/1588230740 2024-12-03T13:08:00,571 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:08:00,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:08:00,576 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:08:00,577 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748944, jitterRate=-0.04766939580440521}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:08:00,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:08:00,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:08:00,578 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:08:00,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:08:00,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:08:00,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:08:00,579 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:08:00,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:08:00,580 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:08:00,581 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-03T13:08:00,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T13:08:00,583 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T13:08:00,585 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T13:08:00,735 DEBUG [35946657df09:40851 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T13:08:00,736 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=35946657df09,42823,1733231279724 2024-12-03T13:08:00,737 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,42823,1733231279724, state=OPENING 2024-12-03T13:08:00,751 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T13:08:00,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:00,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:08:00,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=35946657df09,42823,1733231279724}] 2024-12-03T13:08:00,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:08:00,913 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,42823,1733231279724 2024-12-03T13:08:00,914 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T13:08:00,921 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T13:08:00,943 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-03T13:08:00,944 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:08:00,956 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C42823%2C1733231279724.meta, suffix=.meta, logDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724, archiveDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/oldWALs, maxLogs=32 2024-12-03T13:08:00,959 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta 2024-12-03T13:08:01,003 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta 2024-12-03T13:08:01,003 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43607:43607),(127.0.0.1/127.0.0.1:41005:41005)] 2024-12-03T13:08:01,003 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:08:01,003 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T13:08:01,004 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T13:08:01,004 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T13:08:01,004 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T13:08:01,004 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:08:01,004 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-03T13:08:01,004 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-03T13:08:01,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:08:01,008 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:08:01,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:01,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:08:01,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:08:01,010 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:08:01,010 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:01,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:08:01,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:08:01,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:08:01,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:01,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:08:01,020 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/meta/1588230740 2024-12-03T13:08:01,023 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/meta/1588230740 2024-12-03T13:08:01,025 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:08:01,028 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:08:01,031 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716040, jitterRate=-0.08950912952423096}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:08:01,032 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:08:01,039 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733231280913 2024-12-03T13:08:01,059 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T13:08:01,059 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-03T13:08:01,063 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,42823,1733231279724 2024-12-03T13:08:01,064 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,42823,1733231279724, state=OPEN 2024-12-03T13:08:01,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:08:01,153 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:08:01,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:08:01,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:08:01,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T13:08:01,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=35946657df09,42823,1733231279724 in 394 msec 2024-12-03T13:08:01,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T13:08:01,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 579 msec 2024-12-03T13:08:01,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.0380 sec 2024-12-03T13:08:01,171 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733231281171, completionTime=-1 2024-12-03T13:08:01,171 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T13:08:01,172 DEBUG [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-03T13:08:01,173 DEBUG [hconnection-0x53618785-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:08:01,179 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:08:01,189 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-03T13:08:01,189 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733231341189 2024-12-03T13:08:01,189 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733231401189 2024-12-03T13:08:01,189 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 17 msec 2024-12-03T13:08:01,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:01,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:01,264 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40851,1733231279554-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:01,264 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40851,1733231279554-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:01,265 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40851,1733231279554-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:01,265 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-35946657df09:40851, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:01,265 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:01,265 INFO [master/35946657df09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-03T13:08:01,265 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:08:01,269 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-03T13:08:01,272 DEBUG [master/35946657df09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-03T13:08:01,272 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:08:01,272 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:01,273 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:08:01,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:08:01,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:08:01,729 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 641f7de0aec6dadb323be03e384e3016, NAME => 'hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f 2024-12-03T13:08:01,734 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:08:01,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:01,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:01,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:08:01,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:08:01,795 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:08:01,795 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 641f7de0aec6dadb323be03e384e3016, disabling compactions & flushes 2024-12-03T13:08:01,795 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:01,795 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:01,795 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. after waiting 0 ms 2024-12-03T13:08:01,795 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:01,795 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:01,795 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 641f7de0aec6dadb323be03e384e3016: 2024-12-03T13:08:01,798 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:08:01,799 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733231281799"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231281799"}]},"ts":"1733231281799"} 2024-12-03T13:08:01,805 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:08:01,806 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:08:01,807 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231281806"}]},"ts":"1733231281806"} 2024-12-03T13:08:01,813 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-03T13:08:01,877 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=641f7de0aec6dadb323be03e384e3016, ASSIGN}] 2024-12-03T13:08:01,889 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=641f7de0aec6dadb323be03e384e3016, ASSIGN 2024-12-03T13:08:01,896 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=641f7de0aec6dadb323be03e384e3016, ASSIGN; state=OFFLINE, location=35946657df09,42823,1733231279724; forceNewPlan=false, retain=false 2024-12-03T13:08:02,048 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=641f7de0aec6dadb323be03e384e3016, regionState=OPENING, regionLocation=35946657df09,42823,1733231279724 2024-12-03T13:08:02,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 641f7de0aec6dadb323be03e384e3016, server=35946657df09,42823,1733231279724}] 2024-12-03T13:08:02,205 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,42823,1733231279724 2024-12-03T13:08:02,213 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:02,213 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 641f7de0aec6dadb323be03e384e3016, NAME => 'hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:08:02,213 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:02,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:08:02,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:02,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:02,223 INFO [StoreOpener-641f7de0aec6dadb323be03e384e3016-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:02,225 INFO [StoreOpener-641f7de0aec6dadb323be03e384e3016-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 641f7de0aec6dadb323be03e384e3016 columnFamilyName info 2024-12-03T13:08:02,225 DEBUG [StoreOpener-641f7de0aec6dadb323be03e384e3016-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:02,226 INFO [StoreOpener-641f7de0aec6dadb323be03e384e3016-1 {}] regionserver.HStore(327): Store=641f7de0aec6dadb323be03e384e3016/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:08:02,231 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/namespace/641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:02,233 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/namespace/641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:02,243 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:02,247 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/hbase/namespace/641f7de0aec6dadb323be03e384e3016/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:08:02,249 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 641f7de0aec6dadb323be03e384e3016; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747932, jitterRate=-0.04895530641078949}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:08:02,251 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 641f7de0aec6dadb323be03e384e3016: 2024-12-03T13:08:02,259 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016., pid=6, masterSystemTime=1733231282205 2024-12-03T13:08:02,264 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:02,264 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:02,268 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=641f7de0aec6dadb323be03e384e3016, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,42823,1733231279724 2024-12-03T13:08:02,270 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40851 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=35946657df09,42823,1733231279724, table=hbase:namespace, region=641f7de0aec6dadb323be03e384e3016. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T13:08:02,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T13:08:02,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 641f7de0aec6dadb323be03e384e3016, server=35946657df09,42823,1733231279724 in 224 msec 2024-12-03T13:08:02,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T13:08:02,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=641f7de0aec6dadb323be03e384e3016, ASSIGN in 403 msec 2024-12-03T13:08:02,287 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:08:02,291 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231282287"}]},"ts":"1733231282287"} 2024-12-03T13:08:02,303 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-03T13:08:02,329 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:08:02,332 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.0640 sec 2024-12-03T13:08:02,372 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-03T13:08:02,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:02,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:08:02,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:02,424 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-03T13:08:02,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:08:02,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 48 msec 2024-12-03T13:08:02,481 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-03T13:08:02,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:08:02,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 51 msec 2024-12-03T13:08:02,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-03T13:08:02,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-03T13:08:02,585 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.808sec 2024-12-03T13:08:02,585 INFO [master/35946657df09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T13:08:02,585 INFO [master/35946657df09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T13:08:02,585 INFO [master/35946657df09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T13:08:02,585 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T13:08:02,585 INFO [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T13:08:02,586 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40851,1733231279554-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:08:02,586 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40851,1733231279554-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T13:08:02,603 DEBUG [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-03T13:08:02,603 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T13:08:02,603 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40851,1733231279554-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x226eecc5 to 127.0.0.1:53253 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f30988d 2024-12-03T13:08:02,671 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59498341, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:08:02,674 DEBUG [hconnection-0xb431317-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:08:02,677 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:08:02,682 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=35946657df09,40851,1733231279554 2024-12-03T13:08:02,683 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:08:02,695 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-03T13:08:02,711 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/35946657df09:0 server-side Connection retries=45 2024-12-03T13:08:02,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:08:02,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:08:02,711 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:08:02,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:08:02,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:08:02,711 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:08:02,712 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:08:02,712 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45953 2024-12-03T13:08:02,713 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T13:08:02,714 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T13:08:02,716 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:08:02,719 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:08:02,724 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45953 connecting to ZooKeeper ensemble=127.0.0.1:53253 2024-12-03T13:08:02,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459530x0, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:08:02,736 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45953-0x1019c9820000003 connected 2024-12-03T13:08:02,737 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:08:02,739 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-03T13:08:02,740 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:08:02,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45953 2024-12-03T13:08:02,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45953 2024-12-03T13:08:02,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45953 2024-12-03T13:08:02,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45953 2024-12-03T13:08:02,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45953 2024-12-03T13:08:02,750 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-03T13:08:02,769 DEBUG [RS:1;35946657df09:45953 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;35946657df09:45953 2024-12-03T13:08:02,770 INFO [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1008): ClusterId : 229c3d9f-2215-4539-8004-5d0b427619ac 2024-12-03T13:08:02,770 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T13:08:02,782 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T13:08:02,782 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T13:08:02,786 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T13:08:02,786 DEBUG [RS:1;35946657df09:45953 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10a964bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:08:02,787 DEBUG [RS:1;35946657df09:45953 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6359597f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:08:02,787 INFO [RS:1;35946657df09:45953 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T13:08:02,787 INFO [RS:1;35946657df09:45953 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T13:08:02,787 DEBUG [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T13:08:02,788 INFO [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(3073): reportForDuty to master=35946657df09,40851,1733231279554 with isa=35946657df09/172.17.0.2:45953, startcode=1733231282710 2024-12-03T13:08:02,788 DEBUG [RS:1;35946657df09:45953 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:08:02,799 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60721, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:08:02,803 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40851 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 35946657df09,45953,1733231282710 2024-12-03T13:08:02,804 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40851 {}] master.ServerManager(486): Registering regionserver=35946657df09,45953,1733231282710 2024-12-03T13:08:02,806 DEBUG [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f 2024-12-03T13:08:02,806 DEBUG [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:42201 2024-12-03T13:08:02,806 DEBUG [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T13:08:02,815 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T13:08:02,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:08:02,820 DEBUG [RS:1;35946657df09:45953 {}] zookeeper.ZKUtil(111): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/35946657df09,45953,1733231282710 2024-12-03T13:08:02,820 WARN [RS:1;35946657df09:45953 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:08:02,820 INFO [RS:1;35946657df09:45953 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:08:02,821 DEBUG [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,45953,1733231282710 2024-12-03T13:08:02,821 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [35946657df09,45953,1733231282710] 2024-12-03T13:08:02,836 DEBUG [RS:1;35946657df09:45953 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T13:08:02,836 INFO [RS:1;35946657df09:45953 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T13:08:02,839 INFO [RS:1;35946657df09:45953 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T13:08:02,843 INFO [RS:1;35946657df09:45953 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:08:02,843 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,843 INFO [RS:1;35946657df09:45953 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T13:08:02,844 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,845 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,846 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:08:02,846 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:08:02,846 DEBUG [RS:1;35946657df09:45953 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:08:02,851 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,852 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,852 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,852 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,852 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45953,1733231282710-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:08:02,871 INFO [RS:1;35946657df09:45953 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T13:08:02,871 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45953,1733231282710-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:08:02,894 INFO [RS:1;35946657df09:45953 {}] regionserver.Replication(204): 35946657df09,45953,1733231282710 started 2024-12-03T13:08:02,895 INFO [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1767): Serving as 35946657df09,45953,1733231282710, RpcServer on 35946657df09/172.17.0.2:45953, sessionid=0x1019c9820000003 2024-12-03T13:08:02,895 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;35946657df09:45953,5,FailOnTimeoutGroup] 2024-12-03T13:08:02,895 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T13:08:02,895 DEBUG [RS:1;35946657df09:45953 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 35946657df09,45953,1733231282710 2024-12-03T13:08:02,895 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-03T13:08:02,895 DEBUG [RS:1;35946657df09:45953 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,45953,1733231282710' 2024-12-03T13:08:02,895 DEBUG [RS:1;35946657df09:45953 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T13:08:02,896 DEBUG [RS:1;35946657df09:45953 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T13:08:02,896 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T13:08:02,896 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T13:08:02,896 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T13:08:02,896 DEBUG [RS:1;35946657df09:45953 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 35946657df09,45953,1733231282710 2024-12-03T13:08:02,897 DEBUG [RS:1;35946657df09:45953 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,45953,1733231282710' 2024-12-03T13:08:02,897 DEBUG [RS:1;35946657df09:45953 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T13:08:02,898 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T13:08:02,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40851 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T13:08:02,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40851 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T13:08:02,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40851 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:08:02,900 DEBUG [RS:1;35946657df09:45953 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T13:08:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40851 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T13:08:02,903 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:08:02,903 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:02,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40851 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-03T13:08:02,903 DEBUG [RS:1;35946657df09:45953 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T13:08:02,904 INFO [RS:1;35946657df09:45953 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T13:08:02,905 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:08:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40851 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:08:02,909 INFO [RS:1;35946657df09:45953 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T13:08:02,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741837_1013 (size=393) 2024-12-03T13:08:02,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741837_1013 (size=393) 2024-12-03T13:08:02,934 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1d703b8dd1a7ffa0786db6127714a68b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f 2024-12-03T13:08:02,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38443 is added to blk_1073741838_1014 (size=76) 2024-12-03T13:08:02,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41871 is added to blk_1073741838_1014 (size=76) 2024-12-03T13:08:02,958 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:08:02,958 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing 1d703b8dd1a7ffa0786db6127714a68b, disabling compactions & flushes 2024-12-03T13:08:02,958 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:02,958 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:02,958 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. after waiting 0 ms 2024-12-03T13:08:02,958 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:02,958 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:02,958 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1d703b8dd1a7ffa0786db6127714a68b: 2024-12-03T13:08:02,960 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:08:02,960 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733231282960"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231282960"}]},"ts":"1733231282960"} 2024-12-03T13:08:02,963 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:08:02,966 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:08:02,966 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231282966"}]},"ts":"1733231282966"} 2024-12-03T13:08:02,979 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-03T13:08:03,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1d703b8dd1a7ffa0786db6127714a68b, ASSIGN}] 2024-12-03T13:08:03,020 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1d703b8dd1a7ffa0786db6127714a68b, ASSIGN 2024-12-03T13:08:03,022 INFO [RS:1;35946657df09:45953 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C45953%2C1733231282710, suffix=, logDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,45953,1733231282710, archiveDir=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/oldWALs, maxLogs=32 2024-12-03T13:08:03,025 INFO [RS:1;35946657df09:45953 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C45953%2C1733231282710.1733231283024 2024-12-03T13:08:03,031 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1d703b8dd1a7ffa0786db6127714a68b, ASSIGN; state=OFFLINE, location=35946657df09,42823,1733231279724; forceNewPlan=false, retain=false 2024-12-03T13:08:03,069 INFO [RS:1;35946657df09:45953 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,45953,1733231282710/35946657df09%2C45953%2C1733231282710.1733231283024 2024-12-03T13:08:03,069 DEBUG [RS:1;35946657df09:45953 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41005:41005),(127.0.0.1/127.0.0.1:43607:43607)] 2024-12-03T13:08:03,183 INFO [35946657df09:40851 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T13:08:03,183 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=1d703b8dd1a7ffa0786db6127714a68b, regionState=OPENING, regionLocation=35946657df09,42823,1733231279724 2024-12-03T13:08:03,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 1d703b8dd1a7ffa0786db6127714a68b, server=35946657df09,42823,1733231279724}] 2024-12-03T13:08:03,340 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 35946657df09,42823,1733231279724 2024-12-03T13:08:03,346 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:03,346 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 1d703b8dd1a7ffa0786db6127714a68b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:08:03,347 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:03,347 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:08:03,347 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:03,347 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:03,359 INFO [StoreOpener-1d703b8dd1a7ffa0786db6127714a68b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:03,365 INFO [StoreOpener-1d703b8dd1a7ffa0786db6127714a68b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d703b8dd1a7ffa0786db6127714a68b columnFamilyName info 2024-12-03T13:08:03,365 DEBUG [StoreOpener-1d703b8dd1a7ffa0786db6127714a68b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:08:03,366 INFO [StoreOpener-1d703b8dd1a7ffa0786db6127714a68b-1 {}] regionserver.HStore(327): Store=1d703b8dd1a7ffa0786db6127714a68b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:08:03,371 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:03,372 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:03,382 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:03,386 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:08:03,387 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 1d703b8dd1a7ffa0786db6127714a68b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848846, jitterRate=0.07936383783817291}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:08:03,388 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 1d703b8dd1a7ffa0786db6127714a68b: 2024-12-03T13:08:03,395 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b., pid=11, masterSystemTime=1733231283340 2024-12-03T13:08:03,398 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:03,399 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:03,399 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=1d703b8dd1a7ffa0786db6127714a68b, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,42823,1733231279724 2024-12-03T13:08:03,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-03T13:08:03,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 1d703b8dd1a7ffa0786db6127714a68b, server=35946657df09,42823,1733231279724 in 223 msec 2024-12-03T13:08:03,423 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T13:08:03,423 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1d703b8dd1a7ffa0786db6127714a68b, ASSIGN in 409 msec 2024-12-03T13:08:03,424 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:08:03,424 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231283424"}]},"ts":"1733231283424"} 2024-12-03T13:08:03,426 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-03T13:08:03,462 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:08:03,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 562 msec 2024-12-03T13:08:06,244 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T13:08:06,245 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-03T13:08:06,246 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-03T13:08:07,716 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:08:07,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:07,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:12,813 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T13:08:12,813 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-03T13:08:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40851 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:08:12,906 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-03T13:08:12,910 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T13:08:12,910 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:12,932 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:08:12,943 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:08:12,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:08:12,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:08:12,960 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:08:12,962 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e1dd1d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:08:12,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10ca804b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:08:13,112 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ece3dae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir/jetty-localhost-33075-hadoop-hdfs-3_4_1-tests_jar-_-any-13140195191986468794/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:13,113 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3884b926{HTTP/1.1, (http/1.1)}{localhost:33075} 2024-12-03T13:08:13,113 INFO [Time-limited test {}] server.Server(415): Started @138878ms 2024-12-03T13:08:13,115 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:08:13,238 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:08:13,251 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:08:13,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:08:13,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:08:13,263 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:08:13,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69840c2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:08:13,268 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f0f8812{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:08:13,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@712dd7d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir/jetty-localhost-42735-hadoop-hdfs-3_4_1-tests_jar-_-any-6976047784544091606/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:13,397 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a185276{HTTP/1.1, (http/1.1)}{localhost:42735} 2024-12-03T13:08:13,397 INFO [Time-limited test {}] server.Server(415): Started @139162ms 2024-12-03T13:08:13,399 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:08:13,456 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:08:13,466 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:08:13,468 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:08:13,468 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:08:13,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:08:13,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@526b87a6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:08:13,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c620838{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:08:13,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1794f7a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir/jetty-localhost-46323-hadoop-hdfs-3_4_1-tests_jar-_-any-5401369045700360172/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:13,593 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3bcbf20d{HTTP/1.1, (http/1.1)}{localhost:46323} 2024-12-03T13:08:13,593 INFO [Time-limited test {}] server.Server(415): Started @139358ms 2024-12-03T13:08:13,595 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:08:13,819 WARN [Thread-669 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:08:13,819 WARN [Thread-670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:08:13,860 WARN [Thread-613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:08:13,865 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x243b34e3c7afc55a with lease ID 0x147fe8b21f88577c: Processing first storage report for DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8 from datanode DatanodeRegistration(127.0.0.1:41515, datanodeUuid=991a1b7e-cb01-4d7a-bd53-a03c488bce89, infoPort=33439, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:08:13,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x243b34e3c7afc55a with lease ID 0x147fe8b21f88577c: from storage DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8 node DatanodeRegistration(127.0.0.1:41515, datanodeUuid=991a1b7e-cb01-4d7a-bd53-a03c488bce89, infoPort=33439, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:08:13,866 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x243b34e3c7afc55a with lease ID 0x147fe8b21f88577c: Processing first storage report for DS-1bc18c28-10ac-4069-8798-dfb24d2f52be from datanode DatanodeRegistration(127.0.0.1:41515, datanodeUuid=991a1b7e-cb01-4d7a-bd53-a03c488bce89, infoPort=33439, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:08:13,866 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x243b34e3c7afc55a with lease ID 0x147fe8b21f88577c: from storage DS-1bc18c28-10ac-4069-8798-dfb24d2f52be node DatanodeRegistration(127.0.0.1:41515, datanodeUuid=991a1b7e-cb01-4d7a-bd53-a03c488bce89, infoPort=33439, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:08:14,255 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data8/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:08:14,259 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data7/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:08:14,300 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:08:14,304 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3148a1b0072da972 with lease ID 0x147fe8b21f88577d: Processing first storage report for DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2 from datanode DatanodeRegistration(127.0.0.1:40745, datanodeUuid=734898f3-0c66-4004-a8e7-e7569f23e3e9, infoPort=35313, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:08:14,304 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3148a1b0072da972 with lease ID 0x147fe8b21f88577d: from storage DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2 node DatanodeRegistration(127.0.0.1:40745, datanodeUuid=734898f3-0c66-4004-a8e7-e7569f23e3e9, infoPort=35313, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T13:08:14,304 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3148a1b0072da972 with lease ID 0x147fe8b21f88577d: Processing first storage report for DS-dbf84caf-6ee7-4a40-819d-f9f5b52f5eb5 from datanode DatanodeRegistration(127.0.0.1:40745, datanodeUuid=734898f3-0c66-4004-a8e7-e7569f23e3e9, infoPort=35313, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:08:14,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3148a1b0072da972 with lease ID 0x147fe8b21f88577d: from storage DS-dbf84caf-6ee7-4a40-819d-f9f5b52f5eb5 node DatanodeRegistration(127.0.0.1:40745, datanodeUuid=734898f3-0c66-4004-a8e7-e7569f23e3e9, infoPort=35313, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:08:14,398 WARN [Thread-694 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data9/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:08:14,399 WARN [Thread-695 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data10/current/BP-1901943026-172.17.0.2-1733231277769/current, will proceed with Du for space computation calculation, 2024-12-03T13:08:14,419 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:08:14,422 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50c9d418692ed039 with lease ID 0x147fe8b21f88577e: Processing first storage report for DS-db51cff4-b0ae-46ba-9514-33bac32e054f from datanode DatanodeRegistration(127.0.0.1:35879, datanodeUuid=4d252050-9295-42d4-bd01-2e09556fe966, infoPort=43547, infoSecurePort=0, ipcPort=41219, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:08:14,422 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50c9d418692ed039 with lease ID 0x147fe8b21f88577e: from storage DS-db51cff4-b0ae-46ba-9514-33bac32e054f node DatanodeRegistration(127.0.0.1:35879, datanodeUuid=4d252050-9295-42d4-bd01-2e09556fe966, infoPort=43547, infoSecurePort=0, ipcPort=41219, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:08:14,422 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x50c9d418692ed039 with lease ID 0x147fe8b21f88577e: Processing first storage report for DS-b3c1be95-582e-4639-96af-16d05aa20e59 from datanode DatanodeRegistration(127.0.0.1:35879, datanodeUuid=4d252050-9295-42d4-bd01-2e09556fe966, infoPort=43547, infoSecurePort=0, ipcPort=41219, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769) 2024-12-03T13:08:14,422 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x50c9d418692ed039 with lease ID 0x147fe8b21f88577e: from storage DS-b3c1be95-582e-4639-96af-16d05aa20e59 node DatanodeRegistration(127.0.0.1:35879, datanodeUuid=4d252050-9295-42d4-bd01-2e09556fe966, infoPort=43547, infoSecurePort=0, ipcPort=41219, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:08:14,497 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015 java.io.IOException: Bad response ERROR for BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015 from datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,496 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008 java.io.IOException: Bad response ERROR for BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008 from datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,498 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,501 WARN [DataStreamer for file /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,45953,1733231282710/35946657df09%2C45953%2C1733231282710.1733231283024 block BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK], DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:14,501 WARN [DataStreamer for file /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 block BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK], DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:14,502 WARN [DataStreamer for file /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 block BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK], DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:14,496 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,501 WARN [PacketResponder: BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38443] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,503 WARN [PacketResponder: BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38443] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,504 WARN [DataStreamer for file /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta block BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK], DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:14,504 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:37232 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37232 dst: /127.0.0.1:41871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,504 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:37206 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37206 dst: /127.0.0.1:41871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,504 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:33452 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33452 dst: /127.0.0.1:38443 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:33470 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:38443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33470 dst: /127.0.0.1:38443 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:37244 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37244 dst: /127.0.0.1:41871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:33488 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33488 dst: /127.0.0.1:38443 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,506 WARN [PacketResponder: BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38443] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-620010175_22 at /127.0.0.1:37282 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37282 dst: /127.0.0.1:41871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-620010175_22 at /127.0.0.1:33534 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:38443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33534 dst: /127.0.0.1:38443 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b765977{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:14,536 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50ff44c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:08:14,536 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:08:14,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19d7702a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:08:14,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@623245d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,STOPPED} 2024-12-03T13:08:14,548 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:08:14,548 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:08:14,548 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:08:14,548 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1901943026-172.17.0.2-1733231277769 (Datanode Uuid bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7) service to localhost/127.0.0.1:42201 2024-12-03T13:08:14,548 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data3/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:14,549 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data4/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:14,549 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:08:14,550 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:36692 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36692 dst: /127.0.0.1:41871 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,551 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,557 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,561 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:36696 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36696 dst: /127.0.0.1:41871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,568 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,569 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:36712 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36712 dst: /127.0.0.1:41871 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,577 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-620010175_22 at /127.0.0.1:36716 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:41871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36716 dst: /127.0.0.1:41871 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:14,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@469f194a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:14,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34592b2d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:08:14,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:08:14,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c443c4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:08:14,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@439eb1c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,STOPPED} 2024-12-03T13:08:14,600 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:08:14,600 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1901943026-172.17.0.2-1733231277769 (Datanode Uuid 6714c1e8-3a43-4bf4-9fc7-149c84e2fc1d) service to localhost/127.0.0.1:42201 2024-12-03T13:08:14,601 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:08:14,601 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:08:14,602 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data2/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:14,602 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data1/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:14,602 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:08:14,621 WARN [RS:0;35946657df09:42823.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,622 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C42823%2C1733231279724:(num 1733231280409) roll requested 2024-12-03T13:08:14,622 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.1733231294622 2024-12-03T13:08:14,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42823 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42823 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:32776 deadline: 1733231304614, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-03T13:08:14,643 WARN [Thread-717 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1020 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,643 WARN [Thread-717 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:14,643 WARN [Thread-717 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741840_1020 2024-12-03T13:08:14,655 WARN [Thread-717 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] 2024-12-03T13:08:14,676 WARN [regionserver/35946657df09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-03T13:08:14,676 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231294622 2024-12-03T13:08:14,677 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35313:35313),(127.0.0.1/127.0.0.1:43547:43547)] 2024-12-03T13:08:14,677 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:14,677 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,678 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:14,679 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-03T13:08:14,679 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-03T13:08:14,679 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 2024-12-03T13:08:14,683 WARN [IPC Server handler 1 on default port 42201 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741832_1017 2024-12-03T13:08:14,687 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 after 8ms 2024-12-03T13:08:18,688 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 after 4009ms 2024-12-03T13:08:26,691 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231294622 2024-12-03T13:08:26,692 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741841_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741841_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:26,692 WARN [DataStreamer for file /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231294622 block BP-1901943026-172.17.0.2-1733231277769:blk_1073741841_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741841_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]) is bad. 2024-12-03T13:08:26,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:43138 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43138 dst: /127.0.0.1:40745 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:26,693 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:33816 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741841_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33816 dst: /127.0.0.1:35879 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:26,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@712dd7d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:26,695 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a185276{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:08:26,695 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:08:26,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f0f8812{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:08:26,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69840c2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,STOPPED} 2024-12-03T13:08:26,698 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:08:26,698 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:08:26,698 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1901943026-172.17.0.2-1733231277769 (Datanode Uuid 734898f3-0c66-4004-a8e7-e7569f23e3e9) service to localhost/127.0.0.1:42201 2024-12-03T13:08:26,698 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:08:26,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data7/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:26,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data8/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:26,699 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:08:26,702 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]] 2024-12-03T13:08:26,702 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]] 2024-12-03T13:08:26,702 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C42823%2C1733231279724:(num 1733231294622) roll requested 2024-12-03T13:08:26,702 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.1733231306702 2024-12-03T13:08:26,706 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40745 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:26,706 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:47534 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data10]'}, localName='127.0.0.1:35879', datanodeUuid='4d252050-9295-42d4-bd01-2e09556fe966', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741842_1024 to mirror 127.0.0.1:40745 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:26,707 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK], DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]) is bad. 2024-12-03T13:08:26,707 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741842_1024 2024-12-03T13:08:26,707 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:47534 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:26,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:47534 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47534 dst: /127.0.0.1:35879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:26,707 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK] 2024-12-03T13:08:26,709 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:26,709 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK], DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:26,709 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741843_1025 2024-12-03T13:08:26,710 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] 2024-12-03T13:08:26,712 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:47544 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741844_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data10]'}, localName='127.0.0.1:35879', datanodeUuid='4d252050-9295-42d4-bd01-2e09556fe966', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741844_1026 to mirror 127.0.0.1:41871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:26,713 WARN [Thread-726 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41871 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:26,713 WARN [Thread-726 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK], DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]) is bad. 2024-12-03T13:08:26,713 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:47544 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741844_1026] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:26,713 WARN [Thread-726 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741844_1026 2024-12-03T13:08:26,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:47544 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:35879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47544 dst: /127.0.0.1:35879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:26,714 WARN [Thread-726 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK] 2024-12-03T13:08:26,720 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231294622 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231306702 2024-12-03T13:08:26,720 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439),(127.0.0.1/127.0.0.1:43547:43547)] 2024-12-03T13:08:26,720 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:26,720 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231294622 is not closed yet, will try archiving it next time 2024-12-03T13:08:26,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35879 is added to blk_1073741841_1023 (size=2431) 2024-12-03T13:08:27,124 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:28,447 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5daaf6e0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35879, datanodeUuid=4d252050-9295-42d4-bd01-2e09556fe966, infoPort=43547, infoSecurePort=0, ipcPort=41219, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741841_1023 to 127.0.0.1:41871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:29,484 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:08:30,135 WARN [master/35946657df09:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,135 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C40851%2C1733231279554:(num 1733231279886) roll requested 2024-12-03T13:08:30,135 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40851%2C1733231279554.1733231310135 2024-12-03T13:08:30,136 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,136 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,139 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,139 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741846_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK], DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]) is bad. 2024-12-03T13:08:30,139 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741846_1028 2024-12-03T13:08:30,140 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK] 2024-12-03T13:08:30,142 WARN [Thread-738 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,142 WARN [Thread-738 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741847_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]) is bad. 2024-12-03T13:08:30,142 WARN [Thread-738 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741847_1029 2024-12-03T13:08:30,143 WARN [Thread-738 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK] 2024-12-03T13:08:30,160 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-03T13:08:30,160 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 with entries=93, filesize=46.04 KB; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231310135 2024-12-03T13:08:30,160 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439),(127.0.0.1/127.0.0.1:43547:43547)] 2024-12-03T13:08:30,160 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 is not closed yet, will try archiving it next time 2024-12-03T13:08:30,160 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,161 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,161 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 2024-12-03T13:08:30,161 WARN [IPC Server handler 4 on default port 42201 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 has not been closed. Lease recovery is in progress. RecoveryId = 1031 for block blk_1073741830_1018 2024-12-03T13:08:30,162 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 after 1ms 2024-12-03T13:08:30,706 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030 java.io.IOException: Bad response ERROR for BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030 from datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,706 WARN [ResponseProcessor for block BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027 java.io.IOException: Bad response ERROR for BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027 from datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,706 WARN [DataStreamer for file /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231310135 block BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:30,706 WARN [DataStreamer for file /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231306702 block BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:30,707 WARN [PacketResponder: BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35879] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:38856 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38856 dst: /127.0.0.1:41515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:47552 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1027] {}] datanode.DataXceiver(331): 127.0.0.1:35879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47552 dst: /127.0.0.1:35879 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1794f7a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:30,708 WARN [PacketResponder: BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35879] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,708 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:51350 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51350 dst: /127.0.0.1:41515 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,709 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3bcbf20d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:08:30,709 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:08:30,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c620838{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:08:30,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@526b87a6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,STOPPED} 2024-12-03T13:08:30,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:60738 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741848_1030] {}] datanode.DataXceiver(331): 127.0.0.1:35879:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60738 dst: /127.0.0.1:35879 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,723 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:08:30,723 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:08:30,724 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1901943026-172.17.0.2-1733231277769 (Datanode Uuid 4d252050-9295-42d4-bd01-2e09556fe966) service to localhost/127.0.0.1:42201 2024-12-03T13:08:30,724 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:08:30,731 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data9/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:30,732 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data10/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:08:30,732 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:08:30,735 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]] 2024-12-03T13:08:30,735 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]] 2024-12-03T13:08:30,735 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C42823%2C1733231279724:(num 1733231306702) roll requested 2024-12-03T13:08:30,735 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.1733231310735 2024-12-03T13:08:30,741 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,741 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741849_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK], DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:30,741 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741849_1034 2024-12-03T13:08:30,742 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] 2024-12-03T13:08:30,745 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,745 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741850_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK], DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:30,745 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741850_1035 2024-12-03T13:08:30,746 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] 2024-12-03T13:08:30,749 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,749 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741851_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK], DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]) is bad. 2024-12-03T13:08:30,749 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741851_1036 2024-12-03T13:08:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42823 {}] regionserver.HRegion(8581): Flush requested on 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:30,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d703b8dd1a7ffa0786db6127714a68b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:08:30,750 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK] 2024-12-03T13:08:30,760 WARN [Thread-750 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40745 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,760 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51388 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741852_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741852_1037 to mirror 127.0.0.1:40745 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,760 WARN [Thread-750 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741852_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]) is bad. 2024-12-03T13:08:30,760 WARN [Thread-750 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741852_1037 2024-12-03T13:08:30,760 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51388 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741852_1037] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:30,760 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51388 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741852_1037] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51388 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,761 WARN [Thread-750 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK] 2024-12-03T13:08:30,763 WARN [IPC Server handler 1 on default port 42201 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T13:08:30,763 WARN [IPC Server handler 1 on default port 42201 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T13:08:30,763 WARN [IPC Server handler 1 on default port 42201 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T13:08:30,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/1ef6e035fd2e49f9ae2d2acbbc9ffb5c is 1080, key is row0002/info:/1733231306700/Put/seqid=0 2024-12-03T13:08:30,784 WARN [Thread-752 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,785 WARN [Thread-752 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741854_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK], DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]) is bad. 2024-12-03T13:08:30,785 WARN [Thread-752 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741854_1039 2024-12-03T13:08:30,788 WARN [Thread-752 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK] 2024-12-03T13:08:30,789 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231306702 with entries=13, filesize=14.10 KB; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231310735 2024-12-03T13:08:30,789 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439)] 2024-12-03T13:08:30,789 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:30,789 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231306702 is not closed yet, will try archiving it next time 2024-12-03T13:08:30,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741845_1033 (size=14443) 2024-12-03T13:08:30,795 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:30,804 WARN [Thread-752 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,804 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51406 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741855_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741855_1040 to mirror 127.0.0.1:38443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,804 WARN [Thread-752 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741855_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:30,804 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51406 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741855_1040] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T13:08:30,804 WARN [Thread-752 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741855_1040 2024-12-03T13:08:30,804 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51406 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741855_1040] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51406 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,810 WARN [Thread-752 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] 2024-12-03T13:08:30,820 WARN [Thread-752 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,820 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51418 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741856_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741856_1041 to mirror 127.0.0.1:35879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,821 WARN [Thread-752 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741856_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:30,821 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51418 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741856_1041] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T13:08:30,821 WARN [Thread-752 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741856_1041 2024-12-03T13:08:30,821 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51418 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741856_1041] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51418 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,827 WARN [Thread-752 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] 2024-12-03T13:08:30,831 WARN [Thread-752 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,831 WARN [Thread-752 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741857_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK], DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]) is bad. 2024-12-03T13:08:30,831 WARN [Thread-752 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741857_1042 2024-12-03T13:08:30,833 WARN [Thread-752 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK] 2024-12-03T13:08:30,834 WARN [IPC Server handler 4 on default port 42201 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T13:08:30,834 WARN [IPC Server handler 4 on default port 42201 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T13:08:30,834 WARN [IPC Server handler 4 on default port 42201 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T13:08:30,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741858_1043 (size=10347) 2024-12-03T13:08:30,973 WARN [sync.2 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]] 2024-12-03T13:08:30,973 WARN [sync.2 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]] 2024-12-03T13:08:30,974 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C42823%2C1733231279724:(num 1733231310735) roll requested 2024-12-03T13:08:30,974 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.1733231310974 2024-12-03T13:08:30,978 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51438 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741859_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741859_1044 to mirror 127.0.0.1:38443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,978 WARN [Thread-761 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,979 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51438 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741859_1044] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:30,979 WARN [Thread-761 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741859_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK]) is bad. 2024-12-03T13:08:30,979 WARN [Thread-761 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741859_1044 2024-12-03T13:08:30,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51438 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741859_1044] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51438 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,979 WARN [Thread-761 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38443,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK] 2024-12-03T13:08:30,984 WARN [Thread-761 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,984 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51442 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741860_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741860_1045 to mirror 127.0.0.1:35879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,984 WARN [Thread-761 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741860_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:30,984 WARN [Thread-761 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741860_1045 2024-12-03T13:08:30,984 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51442 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741860_1045] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:30,984 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51442 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741860_1045] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51442 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,985 WARN [Thread-761 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] 2024-12-03T13:08:30,988 WARN [Thread-761 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41871 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51450 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741861_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741861_1046 to mirror 127.0.0.1:41871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,988 WARN [Thread-761 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741861_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]) is bad. 2024-12-03T13:08:30,988 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51450 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741861_1046] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:30,988 WARN [Thread-761 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741861_1046 2024-12-03T13:08:30,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51450 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741861_1046] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51450 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,989 WARN [Thread-761 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK] 2024-12-03T13:08:30,991 WARN [Thread-761 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40745 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:30,991 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51456 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741862_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741862_1047 to mirror 127.0.0.1:40745 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,992 WARN [Thread-761 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741862_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK]) is bad. 2024-12-03T13:08:30,992 WARN [Thread-761 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741862_1047 2024-12-03T13:08:30,992 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51456 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741862_1047] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:30,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:51456 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741862_1047] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51456 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:30,993 WARN [Thread-761 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40745,DS-89a478a0-4dae-4c1e-a5d3-2ed429e7fdf2,DISK] 2024-12-03T13:08:30,993 WARN [IPC Server handler 1 on default port 42201 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T13:08:30,993 WARN [IPC Server handler 1 on default port 42201 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T13:08:30,994 WARN [IPC Server handler 1 on default port 42201 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T13:08:31,004 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231310735 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231310974 2024-12-03T13:08:31,005 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439)] 2024-12-03T13:08:31,005 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:31,005 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231310735 is not closed yet, will try archiving it next time 2024-12-03T13:08:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741853_1038 (size=1261) 2024-12-03T13:08:31,176 WARN [sync.4 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-03T13:08:31,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/1ef6e035fd2e49f9ae2d2acbbc9ffb5c 2024-12-03T13:08:31,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/1ef6e035fd2e49f9ae2d2acbbc9ffb5c as hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/info/1ef6e035fd2e49f9ae2d2acbbc9ffb5c 2024-12-03T13:08:31,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/info/1ef6e035fd2e49f9ae2d2acbbc9ffb5c, entries=5, sequenceid=12, filesize=10.1 K 2024-12-03T13:08:31,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 1d703b8dd1a7ffa0786db6127714a68b in 517ms, sequenceid=12, compaction requested=false 2024-12-03T13:08:31,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d703b8dd1a7ffa0786db6127714a68b: 2024-12-03T13:08:31,395 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:08:31,403 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:08:31,408 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:31,419 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231294622 to hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/oldWALs/35946657df09%2C42823%2C1733231279724.1733231294622 2024-12-03T13:08:31,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:08:31,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:08:31,420 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:08:31,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18fac2c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:08:31,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@638be665{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:08:31,537 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@487dde88{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/java.io.tmpdir/jetty-localhost-41403-hadoop-hdfs-3_4_1-tests_jar-_-any-5042270322916145288/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:08:31,538 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3853f51f{HTTP/1.1, (http/1.1)}{localhost:41403} 2024-12-03T13:08:31,538 INFO [Time-limited test {}] server.Server(415): Started @157303ms 2024-12-03T13:08:31,539 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:08:31,871 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1293a76f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41515, datanodeUuid=991a1b7e-cb01-4d7a-bd53-a03c488bce89, infoPort=33439, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741845_1033 to 127.0.0.1:38443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:31,871 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1c545b4d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41515, datanodeUuid=991a1b7e-cb01-4d7a-bd53-a03c488bce89, infoPort=33439, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741858_1043 to 127.0.0.1:41871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:31,884 WARN [Thread-784 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:08:31,887 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33d03ff8c04f5f70 with lease ID 0x147fe8b21f88577f: from storage DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f node DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T13:08:31,888 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33d03ff8c04f5f70 with lease ID 0x147fe8b21f88577f: from storage DS-13f8643f-ff8f-4072-94dd-a3c1e350d374 node DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:08:32,206 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:08:32,207 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:08:32,869 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1c545b4d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41515, datanodeUuid=991a1b7e-cb01-4d7a-bd53-a03c488bce89, infoPort=33439, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741853_1038 to 127.0.0.1:41871 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:34,163 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 after 4002ms 2024-12-03T13:08:36,786 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:08:36,788 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:08:40,889 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bfdd83b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741836_1012 to 127.0.0.1:35879 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:40,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741838_1014 (size=76) 2024-12-03T13:08:41,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@253a22ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741828_1004 to 127.0.0.1:40745 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:41,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:08:43,578 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T13:08:43,578 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T13:08:43,889 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bfdd83b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741829_1005 to 127.0.0.1:40745 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:43,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:08:44,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@253a22ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741825_1001 to 127.0.0.1:40745 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:46,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@253a22ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741837_1013 to 127.0.0.1:40745 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:46,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bfdd83b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741835_1011 to 127.0.0.1:40745 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:47,890 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@253a22ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37071, datanodeUuid=bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7, infoPort=34309, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=1269992494;c=1733231277769):Failed to transfer BP-1901943026-172.17.0.2-1733231277769:blk_1073741831_1007 to 127.0.0.1:35879 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:47,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741833_1009 (size=32) 2024-12-03T13:08:48,347 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1d703b8dd1a7ffa0786db6127714a68b, had cached 0 bytes from a total of 10347 2024-12-03T13:08:50,449 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.1733231330449 2024-12-03T13:08:50,454 WARN [Thread-812 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:50,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:47148 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741864_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6]'}, localName='127.0.0.1:41515', datanodeUuid='991a1b7e-cb01-4d7a-bd53-a03c488bce89', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741864_1049 to mirror 127.0.0.1:35879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:50,454 WARN [Thread-812 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741864_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:50,454 WARN [Thread-812 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741864_1049 2024-12-03T13:08:50,454 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:47148 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741864_1049] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T13:08:50,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1841777198_22 at /127.0.0.1:47148 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741864_1049] {}] datanode.DataXceiver(331): 127.0.0.1:41515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47148 dst: /127.0.0.1:41515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:50,456 WARN [Thread-812 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] 2024-12-03T13:08:50,498 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231310974 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231330449 2024-12-03T13:08:50,498 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439),(127.0.0.1/127.0.0.1:34309:34309)] 2024-12-03T13:08:50,498 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:50,499 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231310974 is not closed yet, will try archiving it next time 2024-12-03T13:08:50,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741863_1048 (size=1618) 2024-12-03T13:08:50,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42823 {}] regionserver.HRegion(8581): Flush requested on 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:50,512 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 is not closed yet, will try archiving it next time 2024-12-03T13:08:50,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1d703b8dd1a7ffa0786db6127714a68b 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-03T13:08:50,527 INFO [sync.4 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-03T13:08:50,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/dbec2d29324648c9a7804881a45efa96 is 1080, key is row0007/info:/1733231310752/Put/seqid=0 2024-12-03T13:08:50,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741866_1051 (size=13583) 2024-12-03T13:08:50,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741866_1051 (size=13583) 2024-12-03T13:08:50,581 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-03T13:08:50,582 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T13:08:50,582 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x226eecc5 to 127.0.0.1:53253 2024-12-03T13:08:50,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:08:50,582 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T13:08:50,582 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=194310653, stopped=false 2024-12-03T13:08:50,582 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=35946657df09,40851,1733231279554 2024-12-03T13:08:50,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:08:50,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:08:50,641 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-03T13:08:50,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:50,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:50,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:08:50,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:08:50,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:08:50,642 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,42823,1733231279724' ***** 2024-12-03T13:08:50,642 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T13:08:50,642 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,45953,1733231282710' ***** 2024-12-03T13:08:50,642 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T13:08:50,642 INFO [RS:0;35946657df09:42823 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T13:08:50,642 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:08:50,642 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:08:50,643 INFO [RS:1;35946657df09:45953 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T13:08:50,643 INFO [RS:1;35946657df09:45953 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T13:08:50,643 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T13:08:50,643 INFO [RS:1;35946657df09:45953 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T13:08:50,643 INFO [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,45953,1733231282710 2024-12-03T13:08:50,643 DEBUG [RS:1;35946657df09:45953 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:08:50,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:08:50,643 INFO [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,45953,1733231282710; all regions closed. 2024-12-03T13:08:50,644 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,45953,1733231282710 2024-12-03T13:08:50,644 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:50,645 ERROR [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... 2024-12-03T13:08:50,645 DEBUG [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:50,645 DEBUG [RS:1;35946657df09:45953 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:08:50,645 INFO [RS:1;35946657df09:45953 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:08:50,645 INFO [RS:1;35946657df09:45953 {}] hbase.ChoreService(370): Chore service for: regionserver/35946657df09:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-03T13:08:50,646 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:08:50,646 INFO [RS:1;35946657df09:45953 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T13:08:50,646 INFO [RS:1;35946657df09:45953 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T13:08:50,646 INFO [RS:1;35946657df09:45953 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T13:08:50,646 INFO [RS:1;35946657df09:45953 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45953 2024-12-03T13:08:50,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/35946657df09,45953,1733231282710 2024-12-03T13:08:50,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:08:50,660 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [35946657df09,45953,1733231282710] 2024-12-03T13:08:50,660 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 35946657df09,45953,1733231282710; numProcessing=1 2024-12-03T13:08:50,668 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/35946657df09,45953,1733231282710 already deleted, retry=false 2024-12-03T13:08:50,668 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 35946657df09,45953,1733231282710 expired; onlineServers=1 2024-12-03T13:08:50,760 INFO [RS:1;35946657df09:45953 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,45953,1733231282710; zookeeper connection closed. 2024-12-03T13:08:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:08:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45953-0x1019c9820000003, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:08:50,760 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@540c785e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@540c785e 2024-12-03T13:08:50,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=25 (bloomFilter=true), to=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/dbec2d29324648c9a7804881a45efa96 2024-12-03T13:08:50,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/dbec2d29324648c9a7804881a45efa96 as hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/info/dbec2d29324648c9a7804881a45efa96 2024-12-03T13:08:50,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/info/dbec2d29324648c9a7804881a45efa96, entries=8, sequenceid=25, filesize=13.3 K 2024-12-03T13:08:51,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.50 KB/10757, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for 1d703b8dd1a7ffa0786db6127714a68b in 488ms, sequenceid=25, compaction requested=false 2024-12-03T13:08:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1d703b8dd1a7ffa0786db6127714a68b: 2024-12-03T13:08:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-03T13:08:51,000 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:08:51,001 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/info/dbec2d29324648c9a7804881a45efa96 because midkey is the same as first or last row 2024-12-03T13:08:51,001 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T13:08:51,001 INFO [RS:0;35946657df09:42823 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T13:08:51,001 INFO [RS:0;35946657df09:42823 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T13:08:51,001 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(3579): Received CLOSE for 1d703b8dd1a7ffa0786db6127714a68b 2024-12-03T13:08:51,002 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(3579): Received CLOSE for 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:51,002 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,42823,1733231279724 2024-12-03T13:08:51,003 DEBUG [RS:0;35946657df09:42823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:08:51,003 INFO [RS:0;35946657df09:42823 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T13:08:51,003 INFO [RS:0;35946657df09:42823 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T13:08:51,003 INFO [RS:0;35946657df09:42823 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T13:08:51,003 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T13:08:51,003 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 1d703b8dd1a7ffa0786db6127714a68b, disabling compactions & flushes 2024-12-03T13:08:51,003 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:51,003 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-03T13:08:51,003 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:51,003 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1603): Online Regions={1d703b8dd1a7ffa0786db6127714a68b=TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b., 1588230740=hbase:meta,,1.1588230740, 641f7de0aec6dadb323be03e384e3016=hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016.} 2024-12-03T13:08:51,003 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. after waiting 0 ms 2024-12-03T13:08:51,004 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:51,004 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 1d703b8dd1a7ffa0786db6127714a68b, 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:51,004 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 1d703b8dd1a7ffa0786db6127714a68b 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-03T13:08:51,004 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:08:51,004 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:08:51,004 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:08:51,004 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:08:51,004 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:08:51,004 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-03T13:08:51,005 WARN [RS_OPEN_META-regionserver/35946657df09:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:51,005 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:08:51,006 ERROR [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server 35946657df09,42823,1733231279724: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:51,006 ERROR [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-03T13:08:51,006 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C42823%2C1733231279724.meta:.meta(num 1733231280958) roll requested 2024-12-03T13:08:51,007 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C42823%2C1733231279724.meta.1733231331006.meta 2024-12-03T13:08:51,009 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-03T13:08:51,013 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-03T13:08:51,013 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-03T13:08:51,013 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-03T13:08:51,013 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 258764600 }, "NonHeapMemoryUsage": { "committed": 161284096, "init": 7667712, "max": -1, "used": 159635544 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-03T13:08:51,015 WARN [Thread-827 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:51,016 WARN [Thread-827 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741867_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK], DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:51,016 WARN [Thread-827 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741867_1052 2024-12-03T13:08:51,017 WARN [Thread-827 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] 2024-12-03T13:08:51,019 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40851 {}] master.MasterRpcServices(626): 35946657df09,42823,1733231279724 reported a fatal error: ***** ABORTING region server 35946657df09,42823,1733231279724: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-03T13:08:51,035 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/dce79ac5cd9f47afbf589aa8fe82a088 is 1080, key is row0014/info:/1733231330516/Put/seqid=0 2024-12-03T13:08:51,060 WARN [Thread-830 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35879 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:51,060 WARN [Thread-830 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1901943026-172.17.0.2-1733231277769:blk_1073741869_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37071,DS-2cd4f562-9f9a-4adf-a1fa-e65d21cedc2f,DISK], DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK]) is bad. 2024-12-03T13:08:51,060 WARN [Thread-830 {}] hdfs.DataStreamer(1850): Abandoning BP-1901943026-172.17.0.2-1733231277769:blk_1073741869_1054 2024-12-03T13:08:51,060 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:52596 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741869_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data4]'}, localName='127.0.0.1:37071', datanodeUuid='bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7', xmitsInProgress=0}:Exception transferring block BP-1901943026-172.17.0.2-1733231277769:blk_1073741869_1054 to mirror 127.0.0.1:35879 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:51,061 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:52596 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741869_1054] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T13:08:51,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1866369024_22 at /127.0.0.1:52596 [Receiving block BP-1901943026-172.17.0.2-1733231277769:blk_1073741869_1054] {}] datanode.DataXceiver(331): 127.0.0.1:37071:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52596 dst: /127.0.0.1:37071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:08:51,064 WARN [Thread-830 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35879,DS-db51cff4-b0ae-46ba-9514-33bac32e054f,DISK] 2024-12-03T13:08:51,072 WARN [regionserver/35946657df09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-03T13:08:51,072 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231331006.meta 2024-12-03T13:08:51,081 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34309:34309),(127.0.0.1/127.0.0.1:33439:33439)] 2024-12-03T13:08:51,081 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta is not closed yet, will try archiving it next time 2024-12-03T13:08:51,081 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:51,081 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41871,DS-331304b8-ae9c-4bbd-903a-0b69df524b18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:08:51,081 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta 2024-12-03T13:08:51,083 WARN [IPC Server handler 4 on default port 42201 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta has not been closed. Lease recovery is in progress. RecoveryId = 1056 for block blk_1073741834_1016 2024-12-03T13:08:51,083 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta after 2ms 2024-12-03T13:08:51,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741870_1055 (size=14663) 2024-12-03T13:08:51,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741870_1055 (size=14663) 2024-12-03T13:08:51,100 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/dce79ac5cd9f47afbf589aa8fe82a088 2024-12-03T13:08:51,121 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/.tmp/info/dce79ac5cd9f47afbf589aa8fe82a088 as hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/info/dce79ac5cd9f47afbf589aa8fe82a088 2024-12-03T13:08:51,145 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/info/dce79ac5cd9f47afbf589aa8fe82a088, entries=9, sequenceid=37, filesize=14.3 K 2024-12-03T13:08:51,157 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 1d703b8dd1a7ffa0786db6127714a68b in 152ms, sequenceid=37, compaction requested=true 2024-12-03T13:08:51,205 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T13:08:51,205 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 1d703b8dd1a7ffa0786db6127714a68b, 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:51,205 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:08:51,205 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:08:51,205 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:08:51,206 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:08:51,206 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:08:51,206 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:08:51,206 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-03T13:08:51,206 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1d703b8dd1a7ffa0786db6127714a68b/recovered.edits/40.seqid, newMaxSeqId=40, maxSeqId=1 2024-12-03T13:08:51,210 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 1d703b8dd1a7ffa0786db6127714a68b: 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733231282899.1d703b8dd1a7ffa0786db6127714a68b. 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 641f7de0aec6dadb323be03e384e3016, disabling compactions & flushes 2024-12-03T13:08:51,211 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. after waiting 0 ms 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 641f7de0aec6dadb323be03e384e3016: 2024-12-03T13:08:51,211 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,307 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T13:08:51,307 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T13:08:51,405 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(3579): Received CLOSE for 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:51,406 DEBUG [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1629): Waiting on 641f7de0aec6dadb323be03e384e3016 2024-12-03T13:08:51,406 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 641f7de0aec6dadb323be03e384e3016, disabling compactions & flushes 2024-12-03T13:08:51,406 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,406 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,406 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. after waiting 0 ms 2024-12-03T13:08:51,406 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,406 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 641f7de0aec6dadb323be03e384e3016: 2024-12-03T13:08:51,406 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:namespace,,1733231281265.641f7de0aec6dadb323be03e384e3016. 2024-12-03T13:08:51,578 INFO [regionserver/35946657df09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:08:51,606 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-03T13:08:51,606 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,42823,1733231279724; all regions closed. 2024-12-03T13:08:51,606 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724 2024-12-03T13:08:51,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741868_1053 (size=93) 2024-12-03T13:08:51,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741868_1053 (size=93) 2024-12-03T13:08:52,261 INFO [regionserver/35946657df09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:08:52,813 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T13:08:52,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741863_1048 (size=1618) 2024-12-03T13:08:55,084 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta after 4003ms 2024-12-03T13:08:56,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,609 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T13:08:56,610 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724 2024-12-03T13:08:56,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741865_1050 (size=13514) 2024-12-03T13:08:56,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741865_1050 (size=13514) 2024-12-03T13:08:56,743 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:08:56,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:56,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:08:59,484 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:09:02,014 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T13:09:02,014 DEBUG [RS:0;35946657df09:42823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:09:02,014 INFO [RS:0;35946657df09:42823 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:09:02,136 INFO [RS:0;35946657df09:42823 {}] hbase.ChoreService(370): Chore service for: regionserver/35946657df09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T13:09:02,137 INFO [RS:0;35946657df09:42823 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42823 2024-12-03T13:09:02,138 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:09:02,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:09:02,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/35946657df09,42823,1733231279724 2024-12-03T13:09:02,366 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [35946657df09,42823,1733231279724] 2024-12-03T13:09:02,366 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 35946657df09,42823,1733231279724; numProcessing=2 2024-12-03T13:09:02,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:09:02,466 INFO [RS:0;35946657df09:42823 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,42823,1733231279724; zookeeper connection closed. 2024-12-03T13:09:02,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42823-0x1019c9820000001, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:09:02,474 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f87d50e {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f87d50e 2024-12-03T13:09:02,475 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-03T13:09:02,576 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/35946657df09,42823,1733231279724 already deleted, retry=false 2024-12-03T13:09:02,576 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 35946657df09,42823,1733231279724 expired; onlineServers=0 2024-12-03T13:09:02,576 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,40851,1733231279554' ***** 2024-12-03T13:09:02,576 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T13:09:02,577 DEBUG [M:0;35946657df09:40851 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c0e5b32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:09:02,577 INFO [M:0;35946657df09:40851 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,40851,1733231279554 2024-12-03T13:09:02,577 INFO [M:0;35946657df09:40851 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,40851,1733231279554; all regions closed. 2024-12-03T13:09:02,577 DEBUG [M:0;35946657df09:40851 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:09:02,577 DEBUG [M:0;35946657df09:40851 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T13:09:02,577 DEBUG [M:0;35946657df09:40851 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T13:09:02,577 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T13:09:02,577 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231280136 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231280136,5,FailOnTimeoutGroup] 2024-12-03T13:09:02,577 INFO [M:0;35946657df09:40851 {}] hbase.ChoreService(370): Chore service for: master/35946657df09:0 had [] on shutdown 2024-12-03T13:09:02,577 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231280136 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231280136,5,FailOnTimeoutGroup] 2024-12-03T13:09:02,577 DEBUG [M:0;35946657df09:40851 {}] master.HMaster(1733): Stopping service threads 2024-12-03T13:09:02,577 INFO [M:0;35946657df09:40851 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T13:09:02,578 INFO [M:0;35946657df09:40851 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T13:09:02,578 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T13:09:02,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T13:09:02,650 DEBUG [M:0;35946657df09:40851 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/master already deleted, retry=false 2024-12-03T13:09:02,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:02,650 DEBUG [M:0;35946657df09:40851 {}] master.ActiveMasterManager(353): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-03T13:09:02,650 INFO [M:0;35946657df09:40851 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-03T13:09:02,650 INFO [M:0;35946657df09:40851 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T13:09:02,650 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:09:02,650 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:09:02,650 INFO [M:0;35946657df09:40851 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:02,650 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:02,650 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:09:02,650 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:02,651 INFO [M:0;35946657df09:40851 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.08 KB heapSize=49.29 KB 2024-12-03T13:09:02,653 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]] 2024-12-03T13:09:02,653 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41515,DS-790ef67b-1517-47df-a8ec-7b49ccbc92f8,DISK]] 2024-12-03T13:09:02,653 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C40851%2C1733231279554:(num 1733231310135) roll requested 2024-12-03T13:09:02,653 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40851%2C1733231279554.1733231342653 2024-12-03T13:09:02,679 DEBUG [M:0;35946657df09:40851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c72071096c2a4efcaef2584141b6c85d is 82, key is hbase:meta,,1/info:regioninfo/1733231281063/Put/seqid=0 2024-12-03T13:09:02,702 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231310135 with entries=1, filesize=349 B; new WAL /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231342653 2024-12-03T13:09:02,703 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33439:33439),(127.0.0.1/127.0.0.1:34309:34309)] 2024-12-03T13:09:02,703 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 is not closed yet, will try archiving it next time 2024-12-03T13:09:02,703 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231310135 is not closed yet, will try archiving it next time 2024-12-03T13:09:02,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741872_1058 (size=5672) 2024-12-03T13:09:02,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741872_1058 (size=5672) 2024-12-03T13:09:02,718 INFO [M:0;35946657df09:40851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c72071096c2a4efcaef2584141b6c85d 2024-12-03T13:09:02,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:02,770 DEBUG [M:0;35946657df09:40851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/624d5fc1ae784f999cb3d14e9cac8e7d is 774, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733231283463/Put/seqid=0 2024-12-03T13:09:02,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741873_1059 (size=7465) 2024-12-03T13:09:02,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741873_1059 (size=7465) 2024-12-03T13:09:02,819 INFO [M:0;35946657df09:40851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.41 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/624d5fc1ae784f999cb3d14e9cac8e7d 2024-12-03T13:09:02,871 DEBUG [M:0;35946657df09:40851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ebe1cab0abb4e3097c52dde8bc6f43b is 69, key is 35946657df09,42823,1733231279724/rs:state/1733231280220/Put/seqid=0 2024-12-03T13:09:02,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741874_1060 (size=5224) 2024-12-03T13:09:02,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741874_1060 (size=5224) 2024-12-03T13:09:02,891 INFO [M:0;35946657df09:40851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ebe1cab0abb4e3097c52dde8bc6f43b 2024-12-03T13:09:02,938 DEBUG [M:0;35946657df09:40851 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/341ad203dac34b85b180211f72cda2d1 is 52, key is load_balancer_on/state:d/1733231282686/Put/seqid=0 2024-12-03T13:09:02,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741875_1061 (size=5056) 2024-12-03T13:09:02,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741875_1061 (size=5056) 2024-12-03T13:09:02,986 INFO [M:0;35946657df09:40851 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/341ad203dac34b85b180211f72cda2d1 2024-12-03T13:09:03,005 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c72071096c2a4efcaef2584141b6c85d as hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c72071096c2a4efcaef2584141b6c85d 2024-12-03T13:09:03,018 INFO [M:0;35946657df09:40851 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c72071096c2a4efcaef2584141b6c85d, entries=8, sequenceid=97, filesize=5.5 K 2024-12-03T13:09:03,022 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/624d5fc1ae784f999cb3d14e9cac8e7d as hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/624d5fc1ae784f999cb3d14e9cac8e7d 2024-12-03T13:09:03,038 INFO [M:0;35946657df09:40851 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/624d5fc1ae784f999cb3d14e9cac8e7d, entries=11, sequenceid=97, filesize=7.3 K 2024-12-03T13:09:03,041 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ebe1cab0abb4e3097c52dde8bc6f43b as hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6ebe1cab0abb4e3097c52dde8bc6f43b 2024-12-03T13:09:03,059 INFO [M:0;35946657df09:40851 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6ebe1cab0abb4e3097c52dde8bc6f43b, entries=2, sequenceid=97, filesize=5.1 K 2024-12-03T13:09:03,063 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/341ad203dac34b85b180211f72cda2d1 as hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/341ad203dac34b85b180211f72cda2d1 2024-12-03T13:09:03,073 INFO [M:0;35946657df09:40851 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/341ad203dac34b85b180211f72cda2d1, entries=1, sequenceid=97, filesize=4.9 K 2024-12-03T13:09:03,076 INFO [M:0;35946657df09:40851 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.08 KB/41039, heapSize ~49.23 KB/50408, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 425ms, sequenceid=97, compaction requested=false 2024-12-03T13:09:03,079 INFO [M:0;35946657df09:40851 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:03,079 DEBUG [M:0;35946657df09:40851 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:09:03,082 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554 2024-12-03T13:09:03,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741871_1057 (size=493) 2024-12-03T13:09:03,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41515 is added to blk_1073741871_1057 (size=493) 2024-12-03T13:09:03,112 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 is not closed yet, will try archiving it next time 2024-12-03T13:09:03,114 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231310135 to hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/oldWALs/35946657df09%2C40851%2C1733231279554.1733231310135 2024-12-03T13:09:03,136 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/oldWALs/35946657df09%2C40851%2C1733231279554.1733231310135 to hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/oldWALs/35946657df09%2C40851%2C1733231279554.1733231310135$masterlocalwal$ 2024-12-03T13:09:03,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:03,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:04,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:04,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:04,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741848_1032 (size=357) 2024-12-03T13:09:05,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:05,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:06,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:06,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:07,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:07,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:08,090 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T13:09:08,090 INFO [M:0;35946657df09:40851 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-03T13:09:08,090 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:09:08,091 INFO [M:0;35946657df09:40851 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40851 2024-12-03T13:09:08,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:08,149 DEBUG [M:0;35946657df09:40851 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/35946657df09,40851,1733231279554 already deleted, retry=false 2024-12-03T13:09:08,258 INFO [M:0;35946657df09:40851 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,40851,1733231279554; zookeeper connection closed. 2024-12-03T13:09:08,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:09:08,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40851-0x1019c9820000000, quorum=127.0.0.1:53253, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:09:08,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@487dde88{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:08,294 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3853f51f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:09:08,294 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:09:08,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@638be665{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:09:08,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18fac2c5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,STOPPED} 2024-12-03T13:09:08,299 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:09:08,299 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:09:08,299 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:09:08,299 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1901943026-172.17.0.2-1733231277769 (Datanode Uuid bb5ffb4e-0b04-4306-9aa6-d2b870e4faf7) service to localhost/127.0.0.1:42201 2024-12-03T13:09:08,300 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data3/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:08,300 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data4/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:08,300 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:09:08,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ece3dae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:08,312 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3884b926{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:09:08,312 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:09:08,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10ca804b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:09:08,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e1dd1d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,STOPPED} 2024-12-03T13:09:08,315 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:09:08,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:09:08,315 WARN [BP-1901943026-172.17.0.2-1733231277769 heartbeating to localhost/127.0.0.1:42201 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1901943026-172.17.0.2-1733231277769 (Datanode Uuid 991a1b7e-cb01-4d7a-bd53-a03c488bce89) service to localhost/127.0.0.1:42201 2024-12-03T13:09:08,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:09:08,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data5/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:08,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/cluster_e92a6ccc-83e3-94a5-fa85-df2096793b7d/dfs/data/data6/current/BP-1901943026-172.17.0.2-1733231277769 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:08,316 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:09:08,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38c1fd10{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:09:08,335 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@613da372{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:09:08,335 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:09:08,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23d7b823{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:09:08,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59c96799{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir/,STOPPED} 2024-12-03T13:09:08,352 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-03T13:09:08,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-03T13:09:08,418 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=86 (was 64) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:42201 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-11-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:42201 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:42201 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42201 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$796/0x00007ff724b87210.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:42201 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42201 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42201 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$796/0x00007ff724b87210.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42201 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42201 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$796/0x00007ff724b87210.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=410 (was 403) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1011 (was 873) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=253 (was 1647) 2024-12-03T13:09:08,428 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=86, OpenFileDescriptor=410, MaxFileDescriptor=1048576, SystemLoadAverage=1011, ProcessCount=11, AvailableMemoryMB=253 2024-12-03T13:09:08,428 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T13:09:08,428 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.log.dir so I do NOT create it in target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0 2024-12-03T13:09:08,428 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/aad3bd7b-ca96-4c92-6ccd-9b60de858ad2/hadoop.tmp.dir so I do NOT create it in target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159, deleteOnExit=true 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/test.cache.data in system properties and HBase conf 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir in system properties and HBase conf 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T13:09:08,429 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-03T13:09:08,430 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T13:09:08,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:09:08,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:09:08,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T13:09:08,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:09:08,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T13:09:08,430 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/nfs.dump.dir in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T13:09:08,431 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T13:09:08,447 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:09:08,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:08,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:09,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:09,372 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:09,384 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:09:09,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:09:09,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:09:09,411 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:09:09,419 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:09,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@505e56c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:09:09,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@423321b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:09:09,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@645d76fa{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir/jetty-localhost-44255-hadoop-hdfs-3_4_1-tests_jar-_-any-17436332028274886366/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:09:09,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66f02920{HTTP/1.1, (http/1.1)}{localhost:44255} 2024-12-03T13:09:09,568 INFO [Time-limited test {}] server.Server(415): Started @195333ms 2024-12-03T13:09:09,589 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:09:09,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:09,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:09,825 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:09,830 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:09:09,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:09:09,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:09:09,835 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:09:09,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1834f399{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:09:09,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@479990dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:09:09,979 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@771ac3f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir/jetty-localhost-37101-hadoop-hdfs-3_4_1-tests_jar-_-any-16652496064636319579/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:09,980 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69943f9a{HTTP/1.1, (http/1.1)}{localhost:37101} 2024-12-03T13:09:09,980 INFO [Time-limited test {}] server.Server(415): Started @195745ms 2024-12-03T13:09:09,981 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:09:10,055 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:10,061 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:09:10,065 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:09:10,065 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:09:10,065 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:09:10,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5380ff88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:09:10,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9a6fe07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:09:10,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:10,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@680d18a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir/jetty-localhost-44361-hadoop-hdfs-3_4_1-tests_jar-_-any-2708202169802598916/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:10,206 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69fed8b0{HTTP/1.1, (http/1.1)}{localhost:44361} 2024-12-03T13:09:10,206 INFO [Time-limited test {}] server.Server(415): Started @195971ms 2024-12-03T13:09:10,208 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:09:10,569 WARN [Thread-965 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data2/current/BP-882385131-172.17.0.2-1733231348469/current, will proceed with Du for space computation calculation, 2024-12-03T13:09:10,569 WARN [Thread-964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data1/current/BP-882385131-172.17.0.2-1733231348469/current, will proceed with Du for space computation calculation, 2024-12-03T13:09:10,611 WARN [Thread-930 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:09:10,623 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb86ef065eda6b358 with lease ID 0x1f64fc83ac0f8a0: Processing first storage report for DS-5c600535-856b-41e9-bebc-e9043dadc7da from datanode DatanodeRegistration(127.0.0.1:34741, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=40723, infoSecurePort=0, ipcPort=37033, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469) 2024-12-03T13:09:10,623 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb86ef065eda6b358 with lease ID 0x1f64fc83ac0f8a0: from storage DS-5c600535-856b-41e9-bebc-e9043dadc7da node DatanodeRegistration(127.0.0.1:34741, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=40723, infoSecurePort=0, ipcPort=37033, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:10,624 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb86ef065eda6b358 with lease ID 0x1f64fc83ac0f8a0: Processing first storage report for DS-4b560d87-e57a-4c6f-87a1-d63072588613 from datanode DatanodeRegistration(127.0.0.1:34741, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=40723, infoSecurePort=0, ipcPort=37033, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469) 2024-12-03T13:09:10,624 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb86ef065eda6b358 with lease ID 0x1f64fc83ac0f8a0: from storage DS-4b560d87-e57a-4c6f-87a1-d63072588613 node DatanodeRegistration(127.0.0.1:34741, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=40723, infoSecurePort=0, ipcPort=37033, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:10,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:10,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:10,851 WARN [Thread-977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data3/current/BP-882385131-172.17.0.2-1733231348469/current, will proceed with Du for space computation calculation, 2024-12-03T13:09:10,852 WARN [Thread-978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data4/current/BP-882385131-172.17.0.2-1733231348469/current, will proceed with Du for space computation calculation, 2024-12-03T13:09:10,895 WARN [Thread-954 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:09:10,906 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9e0c1ea3a56d8f34 with lease ID 0x1f64fc83ac0f8a1: Processing first storage report for DS-3c6ad230-b071-49f3-ab64-82aefef2e784 from datanode DatanodeRegistration(127.0.0.1:33873, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=39439, infoSecurePort=0, ipcPort=32903, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469) 2024-12-03T13:09:10,906 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e0c1ea3a56d8f34 with lease ID 0x1f64fc83ac0f8a1: from storage DS-3c6ad230-b071-49f3-ab64-82aefef2e784 node DatanodeRegistration(127.0.0.1:33873, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=39439, infoSecurePort=0, ipcPort=32903, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:10,907 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9e0c1ea3a56d8f34 with lease ID 0x1f64fc83ac0f8a1: Processing first storage report for DS-6f394419-cde1-4e3e-8693-b7dbb67643d7 from datanode DatanodeRegistration(127.0.0.1:33873, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=39439, infoSecurePort=0, ipcPort=32903, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469) 2024-12-03T13:09:10,907 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e0c1ea3a56d8f34 with lease ID 0x1f64fc83ac0f8a1: from storage DS-6f394419-cde1-4e3e-8693-b7dbb67643d7 node DatanodeRegistration(127.0.0.1:33873, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=39439, infoSecurePort=0, ipcPort=32903, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:10,990 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0 2024-12-03T13:09:11,007 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/zookeeper_0, clientPort=58594, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T13:09:11,011 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=58594 2024-12-03T13:09:11,011 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:11,013 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:11,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:09:11,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:09:11,046 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b with version=8 2024-12-03T13:09:11,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/hbase-staging 2024-12-03T13:09:11,049 INFO [Time-limited test {}] client.ConnectionUtils(129): master/35946657df09:0 server-side Connection retries=45 2024-12-03T13:09:11,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:09:11,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:09:11,049 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:09:11,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:09:11,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:09:11,049 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:09:11,050 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:09:11,050 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39675 2024-12-03T13:09:11,051 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:11,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:11,056 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:39675 connecting to ZooKeeper ensemble=127.0.0.1:58594 2024-12-03T13:09:11,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396750x0, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:09:11,131 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39675-0x1019c99374d0000 connected 2024-12-03T13:09:11,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:11,228 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:09:11,247 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:09:11,255 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:09:11,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39675 2024-12-03T13:09:11,266 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39675 2024-12-03T13:09:11,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39675 2024-12-03T13:09:11,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39675 2024-12-03T13:09:11,282 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39675 2024-12-03T13:09:11,283 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b, hbase.cluster.distributed=false 2024-12-03T13:09:11,325 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/35946657df09:0 server-side Connection retries=45 2024-12-03T13:09:11,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:09:11,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:09:11,326 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:09:11,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:09:11,326 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:09:11,326 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:09:11,326 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:09:11,329 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41721 2024-12-03T13:09:11,331 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T13:09:11,339 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T13:09:11,340 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:11,344 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:11,347 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41721 connecting to ZooKeeper ensemble=127.0.0.1:58594 2024-12-03T13:09:11,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417210x0, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:09:11,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417210x0, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:09:11,382 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41721-0x1019c99374d0001 connected 2024-12-03T13:09:11,390 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:09:11,392 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:09:11,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41721 2024-12-03T13:09:11,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41721 2024-12-03T13:09:11,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41721 2024-12-03T13:09:11,396 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41721 2024-12-03T13:09:11,396 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41721 2024-12-03T13:09:11,399 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/35946657df09,39675,1733231351048 2024-12-03T13:09:11,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:09:11,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:09:11,415 DEBUG [M:0;35946657df09:39675 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;35946657df09:39675 2024-12-03T13:09:11,415 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/35946657df09,39675,1733231351048 2024-12-03T13:09:11,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:09:11,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:11,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:09:11,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:11,423 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:09:11,426 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/35946657df09,39675,1733231351048 from backup master directory 2024-12-03T13:09:11,426 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:09:11,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:09:11,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/35946657df09,39675,1733231351048 2024-12-03T13:09:11,435 WARN [master/35946657df09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:09:11,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:09:11,435 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=35946657df09,39675,1733231351048 2024-12-03T13:09:11,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:09:11,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:09:11,495 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/hbase.id with ID: e574063f-1678-4a90-baf9-484fb4ce91f7 2024-12-03T13:09:11,541 INFO [master/35946657df09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:11,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:11,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:11,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:09:11,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:09:11,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:11,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:11,996 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:09:11,997 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T13:09:11,997 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:09:12,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:09:12,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:09:12,053 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/data/master/store 2024-12-03T13:09:12,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:09:12,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:09:12,095 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:12,095 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:09:12,095 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:12,095 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:12,095 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:09:12,095 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:12,095 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:09:12,095 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:09:12,096 WARN [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/data/master/store/.initializing 2024-12-03T13:09:12,097 DEBUG [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048 2024-12-03T13:09:12,101 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C39675%2C1733231351048, suffix=, logDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048, archiveDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/oldWALs, maxLogs=10 2024-12-03T13:09:12,102 INFO [master/35946657df09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C39675%2C1733231351048.1733231352102 2024-12-03T13:09:12,137 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 2024-12-03T13:09:12,138 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40723:40723),(127.0.0.1/127.0.0.1:39439:39439)] 2024-12-03T13:09:12,138 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:09:12,138 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:12,138 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,138 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T13:09:12,142 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:12,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:09:12,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T13:09:12,144 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:12,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:09:12,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:12,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T13:09:12,153 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:12,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:09:12,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T13:09:12,160 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:12,163 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:09:12,171 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,174 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,195 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T13:09:12,203 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:09:12,211 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:09:12,213 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779367, jitterRate=-0.008984014391899109}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T13:09:12,214 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:09:12,217 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T13:09:12,231 DEBUG [master/35946657df09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ce548ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:09:12,235 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-03T13:09:12,235 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T13:09:12,235 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T13:09:12,235 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T13:09:12,240 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 4 msec 2024-12-03T13:09:12,241 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-03T13:09:12,241 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T13:09:12,256 INFO [master/35946657df09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T13:09:12,264 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T13:09:12,307 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-03T13:09:12,307 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T13:09:12,311 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T13:09:12,320 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-03T13:09:12,321 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T13:09:12,322 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T13:09:12,329 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-03T13:09:12,330 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T13:09:12,337 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T13:09:12,355 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T13:09:12,371 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T13:09:12,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:09:12,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:12,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:09:12,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:12,395 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=35946657df09,39675,1733231351048, sessionid=0x1019c99374d0000, setting cluster-up flag (Was=false) 2024-12-03T13:09:12,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:12,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:12,462 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T13:09:12,482 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,39675,1733231351048 2024-12-03T13:09:12,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:12,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:12,554 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T13:09:12,563 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,39675,1733231351048 2024-12-03T13:09:12,578 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-03T13:09:12,579 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-03T13:09:12,579 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T13:09:12,579 DEBUG [master/35946657df09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 35946657df09,39675,1733231351048 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T13:09:12,579 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:09:12,579 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:09:12,580 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:09:12,580 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:09:12,580 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/35946657df09:0, corePoolSize=10, maxPoolSize=10 2024-12-03T13:09:12,580 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,580 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:09:12,580 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,583 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:09:12,583 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-03T13:09:12,584 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733231382584 2024-12-03T13:09:12,584 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T13:09:12,584 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T13:09:12,584 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T13:09:12,584 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T13:09:12,585 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T13:09:12,585 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T13:09:12,585 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,585 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T13:09:12,585 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T13:09:12,585 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T13:09:12,586 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T13:09:12,586 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T13:09:12,586 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231352586,5,FailOnTimeoutGroup] 2024-12-03T13:09:12,587 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231352586,5,FailOnTimeoutGroup] 2024-12-03T13:09:12,587 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,587 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T13:09:12,587 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,587 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,591 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:12,591 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:09:12,625 DEBUG [RS:0;35946657df09:41721 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;35946657df09:41721 2024-12-03T13:09:12,626 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1008): ClusterId : e574063f-1678-4a90-baf9-484fb4ce91f7 2024-12-03T13:09:12,627 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T13:09:12,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:09:12,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:09:12,641 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T13:09:12,641 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T13:09:12,643 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-03T13:09:12,643 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b 2024-12-03T13:09:12,648 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T13:09:12,648 DEBUG [RS:0;35946657df09:41721 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dcc50f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:09:12,649 DEBUG [RS:0;35946657df09:41721 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7641f651, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:09:12,649 INFO [RS:0;35946657df09:41721 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T13:09:12,649 INFO [RS:0;35946657df09:41721 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T13:09:12,649 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T13:09:12,654 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(3073): reportForDuty to master=35946657df09,39675,1733231351048 with isa=35946657df09/172.17.0.2:41721, startcode=1733231351325 2024-12-03T13:09:12,654 DEBUG [RS:0;35946657df09:41721 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:09:12,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:09:12,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:09:12,659 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39545, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:09:12,660 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39675 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 35946657df09,41721,1733231351325 2024-12-03T13:09:12,660 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39675 {}] master.ServerManager(486): Registering regionserver=35946657df09,41721,1733231351325 2024-12-03T13:09:12,662 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b 2024-12-03T13:09:12,662 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39401 2024-12-03T13:09:12,662 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T13:09:12,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:09:12,672 DEBUG [RS:0;35946657df09:41721 {}] zookeeper.ZKUtil(111): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/35946657df09,41721,1733231351325 2024-12-03T13:09:12,672 WARN [RS:0;35946657df09:41721 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:09:12,672 INFO [RS:0;35946657df09:41721 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:09:12,672 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325 2024-12-03T13:09:12,686 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [35946657df09,41721,1733231351325] 2024-12-03T13:09:12,690 DEBUG [RS:0;35946657df09:41721 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T13:09:12,690 INFO [RS:0;35946657df09:41721 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T13:09:12,695 INFO [RS:0;35946657df09:41721 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T13:09:12,699 INFO [RS:0;35946657df09:41721 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:09:12,699 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,699 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T13:09:12,703 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,703 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,703 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,703 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:09:12,704 DEBUG [RS:0;35946657df09:41721 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:09:12,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:12,717 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,717 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,717 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,717 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,717 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41721,1733231351325-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:09:12,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:12,750 INFO [RS:0;35946657df09:41721 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T13:09:12,750 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41721,1733231351325-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:12,778 INFO [RS:0;35946657df09:41721 {}] regionserver.Replication(204): 35946657df09,41721,1733231351325 started 2024-12-03T13:09:12,779 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1767): Serving as 35946657df09,41721,1733231351325, RpcServer on 35946657df09/172.17.0.2:41721, sessionid=0x1019c99374d0001 2024-12-03T13:09:12,779 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T13:09:12,779 DEBUG [RS:0;35946657df09:41721 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 35946657df09,41721,1733231351325 2024-12-03T13:09:12,779 DEBUG [RS:0;35946657df09:41721 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,41721,1733231351325' 2024-12-03T13:09:12,779 DEBUG [RS:0;35946657df09:41721 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T13:09:12,782 DEBUG [RS:0;35946657df09:41721 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T13:09:12,783 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T13:09:12,783 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T13:09:12,783 DEBUG [RS:0;35946657df09:41721 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 35946657df09,41721,1733231351325 2024-12-03T13:09:12,783 DEBUG [RS:0;35946657df09:41721 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,41721,1733231351325' 2024-12-03T13:09:12,783 DEBUG [RS:0;35946657df09:41721 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T13:09:12,784 DEBUG [RS:0;35946657df09:41721 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T13:09:12,785 DEBUG [RS:0;35946657df09:41721 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T13:09:12,785 INFO [RS:0;35946657df09:41721 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T13:09:12,785 INFO [RS:0;35946657df09:41721 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T13:09:12,887 INFO [RS:0;35946657df09:41721 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C41721%2C1733231351325, suffix=, logDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325, archiveDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/oldWALs, maxLogs=32 2024-12-03T13:09:12,888 INFO [RS:0;35946657df09:41721 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:12,920 INFO [RS:0;35946657df09:41721 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:12,920 DEBUG [RS:0;35946657df09:41721 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39439:39439),(127.0.0.1/127.0.0.1:40723:40723)] 2024-12-03T13:09:13,060 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:13,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:09:13,072 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:09:13,072 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:13,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:09:13,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:09:13,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:09:13,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:13,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:09:13,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:09:13,081 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:09:13,081 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:13,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:09:13,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740 2024-12-03T13:09:13,085 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740 2024-12-03T13:09:13,088 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:09:13,090 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:09:13,094 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:09:13,095 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883786, jitterRate=0.12379258871078491}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:09:13,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:09:13,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:09:13,095 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:09:13,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:09:13,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:09:13,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:09:13,096 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:09:13,096 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:09:13,101 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:09:13,101 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-03T13:09:13,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T13:09:13,104 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T13:09:13,105 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T13:09:13,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:13,256 DEBUG [35946657df09:39675 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T13:09:13,256 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=35946657df09,41721,1733231351325 2024-12-03T13:09:13,257 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,41721,1733231351325, state=OPENING 2024-12-03T13:09:13,343 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T13:09:13,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:13,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:13,528 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=35946657df09,41721,1733231351325}] 2024-12-03T13:09:13,528 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:09:13,528 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:09:13,681 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,41721,1733231351325 2024-12-03T13:09:13,682 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T13:09:13,684 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T13:09:13,690 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-03T13:09:13,690 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:09:13,693 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C41721%2C1733231351325.meta, suffix=.meta, logDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325, archiveDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/oldWALs, maxLogs=32 2024-12-03T13:09:13,695 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta 2024-12-03T13:09:13,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:13,709 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta 2024-12-03T13:09:13,709 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40723:40723),(127.0.0.1/127.0.0.1:39439:39439)] 2024-12-03T13:09:13,710 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:09:13,710 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T13:09:13,710 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T13:09:13,710 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T13:09:13,710 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T13:09:13,710 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:13,711 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-03T13:09:13,711 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-03T13:09:13,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:09:13,714 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:09:13,715 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:13,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:09:13,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:09:13,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:09:13,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:13,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:09:13,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:09:13,718 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:09:13,718 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:13,719 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:09:13,720 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740 2024-12-03T13:09:13,727 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740 2024-12-03T13:09:13,732 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:09:13,739 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:09:13,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:13,744 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868450, jitterRate=0.10429172217845917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:09:13,745 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:09:13,749 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733231353681 2024-12-03T13:09:13,756 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T13:09:13,756 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,41721,1733231351325 2024-12-03T13:09:13,756 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-03T13:09:13,757 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,41721,1733231351325, state=OPEN 2024-12-03T13:09:13,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:09:13,788 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:09:13,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:09:13,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:09:13,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T13:09:13,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=35946657df09,41721,1733231351325 in 260 msec 2024-12-03T13:09:13,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T13:09:13,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 702 msec 2024-12-03T13:09:13,817 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2410 sec 2024-12-03T13:09:13,817 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733231353817, completionTime=-1 2024-12-03T13:09:13,817 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T13:09:13,817 DEBUG [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-03T13:09:13,819 DEBUG [hconnection-0x4b82685f-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:09:13,821 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53728, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:09:13,823 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-03T13:09:13,823 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733231413823 2024-12-03T13:09:13,823 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733231473823 2024-12-03T13:09:13,823 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 5 msec 2024-12-03T13:09:13,860 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,39675,1733231351048-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:13,860 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,39675,1733231351048-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:13,860 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,39675,1733231351048-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:13,860 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-35946657df09:39675, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:13,860 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:13,861 INFO [master/35946657df09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-03T13:09:13,861 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:09:13,865 DEBUG [master/35946657df09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-03T13:09:13,865 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-03T13:09:13,867 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:09:13,867 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:13,868 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:09:13,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:09:13,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:09:13,897 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 34f71b7f7972afc48a6173d0ad8bf3a7, NAME => 'hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b 2024-12-03T13:09:13,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:09:13,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:09:13,979 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:13,979 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 34f71b7f7972afc48a6173d0ad8bf3a7, disabling compactions & flushes 2024-12-03T13:09:13,979 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:09:13,979 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:09:13,979 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. after waiting 0 ms 2024-12-03T13:09:13,979 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:09:13,979 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:09:13,979 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 34f71b7f7972afc48a6173d0ad8bf3a7: 2024-12-03T13:09:13,983 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:09:13,984 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733231353983"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231353983"}]},"ts":"1733231353983"} 2024-12-03T13:09:13,987 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:09:13,989 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:09:13,989 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231353989"}]},"ts":"1733231353989"} 2024-12-03T13:09:13,991 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-03T13:09:14,012 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=34f71b7f7972afc48a6173d0ad8bf3a7, ASSIGN}] 2024-12-03T13:09:14,014 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=34f71b7f7972afc48a6173d0ad8bf3a7, ASSIGN 2024-12-03T13:09:14,015 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=34f71b7f7972afc48a6173d0ad8bf3a7, ASSIGN; state=OFFLINE, location=35946657df09,41721,1733231351325; forceNewPlan=false, retain=false 2024-12-03T13:09:14,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:14,166 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=34f71b7f7972afc48a6173d0ad8bf3a7, regionState=OPENING, regionLocation=35946657df09,41721,1733231351325 2024-12-03T13:09:14,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 34f71b7f7972afc48a6173d0ad8bf3a7, server=35946657df09,41721,1733231351325}] 2024-12-03T13:09:14,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,41721,1733231351325 2024-12-03T13:09:14,326 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:09:14,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 34f71b7f7972afc48a6173d0ad8bf3a7, NAME => 'hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:09:14,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:09:14,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:14,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:09:14,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:09:14,332 INFO [StoreOpener-34f71b7f7972afc48a6173d0ad8bf3a7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:09:14,336 INFO [StoreOpener-34f71b7f7972afc48a6173d0ad8bf3a7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 34f71b7f7972afc48a6173d0ad8bf3a7 columnFamilyName info 2024-12-03T13:09:14,336 DEBUG [StoreOpener-34f71b7f7972afc48a6173d0ad8bf3a7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:14,340 INFO [StoreOpener-34f71b7f7972afc48a6173d0ad8bf3a7-1 {}] regionserver.HStore(327): Store=34f71b7f7972afc48a6173d0ad8bf3a7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:09:14,341 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:09:14,342 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:09:14,344 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:09:14,348 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:09:14,350 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 34f71b7f7972afc48a6173d0ad8bf3a7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697859, jitterRate=-0.1126270443201065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:09:14,351 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 34f71b7f7972afc48a6173d0ad8bf3a7: 2024-12-03T13:09:14,352 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7., pid=6, masterSystemTime=1733231354321 2024-12-03T13:09:14,354 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:09:14,354 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:09:14,355 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=34f71b7f7972afc48a6173d0ad8bf3a7, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,41721,1733231351325 2024-12-03T13:09:14,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T13:09:14,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 34f71b7f7972afc48a6173d0ad8bf3a7, server=35946657df09,41721,1733231351325 in 189 msec 2024-12-03T13:09:14,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T13:09:14,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=34f71b7f7972afc48a6173d0ad8bf3a7, ASSIGN in 348 msec 2024-12-03T13:09:14,364 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:09:14,365 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231354364"}]},"ts":"1733231354364"} 2024-12-03T13:09:14,375 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-03T13:09:14,413 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-03T13:09:14,413 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:09:14,415 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 552 msec 2024-12-03T13:09:14,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:14,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:09:14,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:09:14,425 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-03T13:09:14,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:09:14,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-03T13:09:14,457 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-03T13:09:14,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:09:14,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 32 msec 2024-12-03T13:09:14,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-03T13:09:14,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-03T13:09:14,545 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.110sec 2024-12-03T13:09:14,545 INFO [master/35946657df09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T13:09:14,545 INFO [master/35946657df09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T13:09:14,546 INFO [master/35946657df09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T13:09:14,546 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T13:09:14,546 INFO [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T13:09:14,546 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,39675,1733231351048-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:09:14,546 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,39675,1733231351048-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T13:09:14,560 DEBUG [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-03T13:09:14,560 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T13:09:14,560 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,39675,1733231351048-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:09:14,610 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f54b037 to 127.0.0.1:58594 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30307a6f 2024-12-03T13:09:14,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44d31e31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:09:14,630 DEBUG [hconnection-0x7783e7c6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:09:14,635 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:09:14,639 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=35946657df09,39675,1733231351048 2024-12-03T13:09:14,640 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:09:14,648 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-03T13:09:14,648 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-03T13:09:14,648 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-03T13:09:14,654 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T13:09:14,663 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T13:09:14,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39675 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T13:09:14,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39675 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T13:09:14,668 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39675 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:09:14,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39675 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T13:09:14,679 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:09:14,680 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:14,680 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39675 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-03T13:09:14,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39675 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:09:14,687 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:09:14,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:14,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:14,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741837_1013 (size=395) 2024-12-03T13:09:14,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741837_1013 (size=395) 2024-12-03T13:09:14,756 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ea6b9829cde6eab149d3a0635d52b20e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b 2024-12-03T13:09:14,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34741 is added to blk_1073741838_1014 (size=78) 2024-12-03T13:09:14,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33873 is added to blk_1073741838_1014 (size=78) 2024-12-03T13:09:14,784 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:14,785 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing ea6b9829cde6eab149d3a0635d52b20e, disabling compactions & flushes 2024-12-03T13:09:14,785 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:14,785 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:14,785 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. after waiting 0 ms 2024-12-03T13:09:14,785 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:14,785 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:14,785 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for ea6b9829cde6eab149d3a0635d52b20e: 2024-12-03T13:09:14,791 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:09:14,791 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733231354791"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231354791"}]},"ts":"1733231354791"} 2024-12-03T13:09:14,793 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:09:14,794 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:09:14,795 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231354794"}]},"ts":"1733231354794"} 2024-12-03T13:09:14,796 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-03T13:09:14,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ea6b9829cde6eab149d3a0635d52b20e, ASSIGN}] 2024-12-03T13:09:14,823 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ea6b9829cde6eab149d3a0635d52b20e, ASSIGN 2024-12-03T13:09:14,824 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ea6b9829cde6eab149d3a0635d52b20e, ASSIGN; state=OFFLINE, location=35946657df09,41721,1733231351325; forceNewPlan=false, retain=false 2024-12-03T13:09:14,975 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ea6b9829cde6eab149d3a0635d52b20e, regionState=OPENING, regionLocation=35946657df09,41721,1733231351325 2024-12-03T13:09:14,977 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure ea6b9829cde6eab149d3a0635d52b20e, server=35946657df09,41721,1733231351325}] 2024-12-03T13:09:15,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 35946657df09,41721,1733231351325 2024-12-03T13:09:15,134 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:15,134 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => ea6b9829cde6eab149d3a0635d52b20e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:09:15,135 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:09:15,135 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:09:15,135 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:09:15,135 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:09:15,141 INFO [StoreOpener-ea6b9829cde6eab149d3a0635d52b20e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:09:15,147 INFO [StoreOpener-ea6b9829cde6eab149d3a0635d52b20e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea6b9829cde6eab149d3a0635d52b20e columnFamilyName info 2024-12-03T13:09:15,147 DEBUG [StoreOpener-ea6b9829cde6eab149d3a0635d52b20e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:09:15,148 INFO [StoreOpener-ea6b9829cde6eab149d3a0635d52b20e-1 {}] regionserver.HStore(327): Store=ea6b9829cde6eab149d3a0635d52b20e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:09:15,150 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:09:15,150 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:09:15,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:15,152 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:09:15,157 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:09:15,158 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened ea6b9829cde6eab149d3a0635d52b20e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884444, jitterRate=0.124628946185112}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:09:15,159 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for ea6b9829cde6eab149d3a0635d52b20e: 2024-12-03T13:09:15,166 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e., pid=11, masterSystemTime=1733231355129 2024-12-03T13:09:15,172 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:15,172 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:15,179 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ea6b9829cde6eab149d3a0635d52b20e, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,41721,1733231351325 2024-12-03T13:09:15,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-03T13:09:15,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure ea6b9829cde6eab149d3a0635d52b20e, server=35946657df09,41721,1733231351325 in 217 msec 2024-12-03T13:09:15,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T13:09:15,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ea6b9829cde6eab149d3a0635d52b20e, ASSIGN in 381 msec 2024-12-03T13:09:15,205 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:09:15,206 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231355205"}]},"ts":"1733231355205"} 2024-12-03T13:09:15,208 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-03T13:09:15,246 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:09:15,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 579 msec 2024-12-03T13:09:15,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:15,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:16,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:16,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:16,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:17,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:17,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:17,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:18,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:18,690 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-03T13:09:18,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:18,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,734 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:18,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:19,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:19,251 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:09:19,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:09:19,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:19,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:20,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:20,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:20,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:21,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:21,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:21,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:22,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:22,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:22,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:22,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 after 68071ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:22,813 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T13:09:22,813 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-03T13:09:23,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:23,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:23,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:24,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39675 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:09:24,685 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-03T13:09:24,688 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T13:09:24,688 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:09:24,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:24,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:25,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:25,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:25,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:26,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:26,707 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:26,708 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:26,708 WARN [DataStreamer for file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 block BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK], DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]) is bad. 2024-12-03T13:09:26,709 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:26,709 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:55108 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55108 dst: /127.0.0.1:34741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,710 WARN [DataStreamer for file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta block BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK], DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]) is bad. 2024-12-03T13:09:26,710 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:37682 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37682 dst: /127.0.0.1:33873 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,710 WARN [PacketResponder: BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33873] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:55124 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55124 dst: /127.0.0.1:34741 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:37694 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37694 dst: /127.0.0.1:33873 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:26,723 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:26,723 WARN [DataStreamer for file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 block BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK], DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33873,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]) is bad. 2024-12-03T13:09:26,724 WARN [PacketResponder: BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33873] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:55068 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55068 dst: /127.0.0.1:34741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,730 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:37648 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33873:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37648 dst: /127.0.0.1:33873 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:26,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:26,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@680d18a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:26,795 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69fed8b0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:09:26,795 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:09:26,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9a6fe07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:09:26,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5380ff88{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,STOPPED} 2024-12-03T13:09:26,819 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:09:26,819 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:09:26,822 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:09:26,822 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882385131-172.17.0.2-1733231348469 (Datanode Uuid 57a0dfc6-4909-418a-97b3-c4a00cee3b9b) service to localhost/127.0.0.1:39401 2024-12-03T13:09:26,827 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data4/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:26,827 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:09:26,826 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data3/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:26,872 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:26,884 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:09:26,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:09:26,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:09:26,907 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:09:26,916 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2016752a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:09:26,916 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5abd64ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:09:27,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17c9def5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir/jetty-localhost-39173-hadoop-hdfs-3_4_1-tests_jar-_-any-18145711693804443030/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:27,091 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@220b387{HTTP/1.1, (http/1.1)}{localhost:39173} 2024-12-03T13:09:27,091 INFO [Time-limited test {}] server.Server(415): Started @212856ms 2024-12-03T13:09:27,093 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:09:27,158 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:27,159 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:27,158 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:27,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:27,161 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:38504 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38504 dst: /127.0.0.1:34741 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:27,167 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:38536 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38536 dst: /127.0.0.1:34741 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:27,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:38520 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38520 dst: /127.0.0.1:34741 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:27,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@771ac3f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:27,185 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69943f9a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:09:27,185 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:09:27,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@479990dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:09:27,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1834f399{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,STOPPED} 2024-12-03T13:09:27,225 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:09:27,225 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882385131-172.17.0.2-1733231348469 (Datanode Uuid 538fb0ce-d0e0-4ce9-a623-330626998870) service to localhost/127.0.0.1:39401 2024-12-03T13:09:27,226 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:09:27,226 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:09:27,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data1/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:27,227 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:09:27,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data2/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:27,266 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:27,298 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:09:27,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:09:27,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:09:27,321 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:09:27,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d6f715d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:09:27,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@650ffce5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:09:27,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43e6e041{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir/jetty-localhost-45739-hadoop-hdfs-3_4_1-tests_jar-_-any-13419908452057993507/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:27,515 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@465d3541{HTTP/1.1, (http/1.1)}{localhost:45739} 2024-12-03T13:09:27,516 INFO [Time-limited test {}] server.Server(415): Started @213281ms 2024-12-03T13:09:27,517 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:09:27,541 WARN [Thread-1111 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:09:27,546 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb04e44234fd71a88 with lease ID 0x1f64fc83ac0f8a2: from storage DS-3c6ad230-b071-49f3-ab64-82aefef2e784 node DatanodeRegistration(127.0.0.1:36449, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=43179, infoSecurePort=0, ipcPort=35915, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:27,546 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb04e44234fd71a88 with lease ID 0x1f64fc83ac0f8a2: from storage DS-6f394419-cde1-4e3e-8693-b7dbb67643d7 node DatanodeRegistration(127.0.0.1:36449, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=43179, infoSecurePort=0, ipcPort=35915, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:27,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:27,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:27,915 WARN [Thread-1142 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:09:27,931 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7328b9c07fc2effd with lease ID 0x1f64fc83ac0f8a3: from storage DS-5c600535-856b-41e9-bebc-e9043dadc7da node DatanodeRegistration(127.0.0.1:45021, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=35877, infoSecurePort=0, ipcPort=42319, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:27,931 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7328b9c07fc2effd with lease ID 0x1f64fc83ac0f8a3: from storage DS-4b560d87-e57a-4c6f-87a1-d63072588613 node DatanodeRegistration(127.0.0.1:45021, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=35877, infoSecurePort=0, ipcPort=42319, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:28,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:28,625 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-03T13:09:28,627 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-03T13:09:28,628 WARN [RS:0;35946657df09:41721.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:28,628 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C41721%2C1733231351325:(num 1733231352888) roll requested 2024-12-03T13:09:28,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41721 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:28,629 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:28,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53738 deadline: 1733231378627, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-03T13:09:28,641 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 newFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:28,641 WARN [regionserver/35946657df09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-03T13:09:28,641 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:28,642 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35877:35877),(127.0.0.1/127.0.0.1:43179:43179)] 2024-12-03T13:09:28,642 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 is not closed yet, will try archiving it next time 2024-12-03T13:09:28,642 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:28,642 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:28,642 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:28,644 WARN [IPC Server handler 3 on default port 39401 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1015 2024-12-03T13:09:28,644 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 after 2ms 2024-12-03T13:09:28,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:28,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45021 is added to blk_1073741833_1019 (size=2136) 2024-12-03T13:09:29,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:29,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:29,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:30,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:30,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:30,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:31,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:31,545 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T13:09:31,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:31,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:32,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:32,645 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 after 4003ms 2024-12-03T13:09:32,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:32,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:33,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:33,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:33,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:34,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:34,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:34,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:35,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:35,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:35,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:36,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:36,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:36,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:37,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:37,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:37,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:38,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:38,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:38,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 after 68571ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:38,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:39,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:39,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:39,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:40,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:40,722 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-03T13:09:40,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:40,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:40,990 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:09:41,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:41,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:41,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:42,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:42,584 WARN [master/35946657df09:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:42,585 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C39675%2C1733231351048:(num 1733231352102) roll requested 2024-12-03T13:09:42,585 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:42,585 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C39675%2C1733231351048.1733231382585 2024-12-03T13:09:42,585 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:42,606 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-03T13:09:42,607 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 with entries=92, filesize=46.00 KB; new WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 2024-12-03T13:09:42,608 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43179:43179),(127.0.0.1/127.0.0.1:35877:35877)] 2024-12-03T13:09:42,609 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 is not closed yet, will try archiving it next time 2024-12-03T13:09:42,609 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:42,609 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:42,609 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 2024-12-03T13:09:42,611 WARN [IPC Server handler 3 on default port 39401 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741830_1017 2024-12-03T13:09:42,614 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 after 5ms 2024-12-03T13:09:42,649 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:09:42,650 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:09:42,733 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:42,733 WARN [DataStreamer for file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 block BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45021,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK], DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45021,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]) is bad. 2024-12-03T13:09:42,734 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020 java.io.IOException: Bad response ERROR for BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020 from datanode DatanodeInfoWithStorage[127.0.0.1:45021,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:42,734 WARN [DataStreamer for file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 block BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK], DatanodeInfoWithStorage[127.0.0.1:45021,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45021,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]) is bad. 2024-12-03T13:09:42,734 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:60132 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:45021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60132 dst: /127.0.0.1:45021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:42,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:42,735 WARN [PacketResponder: BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45021] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:42,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:41962 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41962 dst: /127.0.0.1:36449 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:42,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:58156 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:36449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58156 dst: /127.0.0.1:36449 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:42,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:45232 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:45021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45232 dst: /127.0.0.1:45021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:42,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:42,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43e6e041{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:42,793 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@465d3541{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:09:42,793 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:09:42,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@650ffce5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:09:42,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d6f715d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,STOPPED} 2024-12-03T13:09:42,816 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:09:42,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:09:42,816 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882385131-172.17.0.2-1733231348469 (Datanode Uuid 538fb0ce-d0e0-4ce9-a623-330626998870) service to localhost/127.0.0.1:39401 2024-12-03T13:09:42,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:09:42,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data1/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:42,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data2/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:42,819 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:09:42,851 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:42,866 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:09:42,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:09:42,874 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:09:42,874 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:09:42,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30c0f0c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:09:42,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bcb5ee9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:09:43,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@256eaf82{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir/jetty-localhost-37535-hadoop-hdfs-3_4_1-tests_jar-_-any-10292630146811217571/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:43,008 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65d74252{HTTP/1.1, (http/1.1)}{localhost:37535} 2024-12-03T13:09:43,009 INFO [Time-limited test {}] server.Server(415): Started @228774ms 2024-12-03T13:09:43,010 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:09:43,071 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1022 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:43,074 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:43,079 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:58164 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:36449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58164 dst: /127.0.0.1:36449 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:43,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17c9def5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:43,081 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@220b387{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:09:43,081 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:09:43,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5abd64ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:09:43,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2016752a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,STOPPED} 2024-12-03T13:09:43,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1273537580_22 at /127.0.0.1:58158 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58158 dst: /127.0.0.1:36449 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:43,116 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:09:43,117 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882385131-172.17.0.2-1733231348469 (Datanode Uuid 57a0dfc6-4909-418a-97b3-c4a00cee3b9b) service to localhost/127.0.0.1:39401 2024-12-03T13:09:43,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data3/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:43,118 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:09:43,118 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:09:43,118 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data4/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:09:43,119 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:09:43,135 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:09:43,139 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:09:43,146 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:09:43,146 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:09:43,146 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:09:43,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@666b6bf8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:09:43,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a77606d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:09:43,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:43,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74de0b9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/java.io.tmpdir/jetty-localhost-42559-hadoop-hdfs-3_4_1-tests_jar-_-any-13457442242379270190/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:09:43,300 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3967a70f{HTTP/1.1, (http/1.1)}{localhost:42559} 2024-12-03T13:09:43,300 INFO [Time-limited test {}] server.Server(415): Started @229065ms 2024-12-03T13:09:43,302 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:09:43,420 WARN [Thread-1196 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:09:43,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7242af04fcff5ae with lease ID 0x1f64fc83ac0f8a4: from storage DS-5c600535-856b-41e9-bebc-e9043dadc7da node DatanodeRegistration(127.0.0.1:37167, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=40011, infoSecurePort=0, ipcPort=45611, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:43,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7242af04fcff5ae with lease ID 0x1f64fc83ac0f8a4: from storage DS-4b560d87-e57a-4c6f-87a1-d63072588613 node DatanodeRegistration(127.0.0.1:37167, datanodeUuid=538fb0ce-d0e0-4ce9-a623-330626998870, infoPort=40011, infoSecurePort=0, ipcPort=45611, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:43,718 WARN [Thread-1222 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:09:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb3c5cad3849355e with lease ID 0x1f64fc83ac0f8a5: from storage DS-3c6ad230-b071-49f3-ab64-82aefef2e784 node DatanodeRegistration(127.0.0.1:38709, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=37715, infoSecurePort=0, ipcPort=39029, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 8, hasStaleStorage: false, processing time: 3 msecs, invalidatedBlocks: 0 2024-12-03T13:09:43,729 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb3c5cad3849355e with lease ID 0x1f64fc83ac0f8a5: from storage DS-6f394419-cde1-4e3e-8693-b7dbb67643d7 node DatanodeRegistration(127.0.0.1:38709, datanodeUuid=57a0dfc6-4909-418a-97b3-c4a00cee3b9b, infoPort=37715, infoSecurePort=0, ipcPort=39029, storageInfo=lv=-57;cid=testClusterID;nsid=1663561927;c=1733231348469), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:09:43,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:43,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:44,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:44,419 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-03T13:09:44,421 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-03T13:09:44,422 WARN [RS:0;35946657df09:41721.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:44,422 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C41721%2C1733231351325:(num 1733231368629) roll requested 2024-12-03T13:09:44,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41721 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:44,423 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41721%2C1733231351325.1733231384423 2024-12-03T13:09:44,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53738 deadline: 1733231394422, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-03T13:09:44,462 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 newFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 2024-12-03T13:09:44,462 WARN [regionserver/35946657df09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-03T13:09:44,463 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 2024-12-03T13:09:44,463 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37715:37715),(127.0.0.1/127.0.0.1:40011:40011)] 2024-12-03T13:09:44,463 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 is not closed yet, will try archiving it next time 2024-12-03T13:09:44,463 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:44,463 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:44,463 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:44,464 WARN [IPC Server handler 0 on default port 39401 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 has not been closed. Lease recovery is in progress. RecoveryId = 1025 for block blk_1073741839_1022 2024-12-03T13:09:44,470 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 after 7ms 2024-12-03T13:09:44,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:44,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:45,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:45,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:45,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:46,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:46,614 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231352102 after 4005ms 2024-12-03T13:09:46,724 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 2) replica(s): 0) Failed to delete replica blk_1073741830_1017: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T13:09:46,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:46,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:47,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:47,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:47,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:48,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:48,471 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 after 4008ms 2024-12-03T13:09:48,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:48,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:49,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:49,431 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1022: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T13:09:49,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:49,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:50,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:50,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:50,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:51,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:51,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:51,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:52,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:52,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:52,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:53,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:53,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:53,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:54,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:54,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:54,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:55,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:55,455 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T13:09:55,455 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T13:09:55,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:55,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:56,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:56,441 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:09:56,469 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 newFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:09:56,473 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:09:56,473 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37715:37715),(127.0.0.1/127.0.0.1:40011:40011)] 2024-12-03T13:09:56,473 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 is not closed yet, will try archiving it next time 2024-12-03T13:09:56,473 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:56,474 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:56,476 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 after 2ms 2024-12-03T13:09:56,476 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:56,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741841_1024 (size=1264) 2024-12-03T13:09:56,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741841_1024 (size=1264) 2024-12-03T13:09:56,490 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733231354351/Put/vlen=162/seqid=0] 2024-12-03T13:09:56,490 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1733231354430/Put/vlen=9/seqid=0] 2024-12-03T13:09:56,490 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1733231354468/Put/vlen=7/seqid=0] 2024-12-03T13:09:56,490 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733231355159/Put/vlen=218/seqid=0] 2024-12-03T13:09:56,490 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1733231364701/Put/vlen=1045/seqid=0] 2024-12-03T13:09:56,490 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231352888 2024-12-03T13:09:56,490 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:56,490 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:56,492 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 after 2ms 2024-12-03T13:09:56,492 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:56,517 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1733231378718/Put/vlen=1045/seqid=0] 2024-12-03T13:09:56,518 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1733231380723/Put/vlen=1045/seqid=0] 2024-12-03T13:09:56,518 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231368629 2024-12-03T13:09:56,518 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 2024-12-03T13:09:56,518 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 2024-12-03T13:09:56,519 WARN [IPC Server handler 3 on default port 39401 {}] namenode.FSNamesystem(3730): BLOCK* internalReleaseLease: All existing blocks are COMPLETE, lease removed, file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 closed. 2024-12-03T13:09:56,521 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 after 3ms 2024-12-03T13:09:56,521 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231384423 2024-12-03T13:09:56,532 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1733231394439/Put/vlen=1045/seqid=0] 2024-12-03T13:09:56,532 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:09:56,532 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:09:56,532 WARN [IPC Server handler 1 on default port 39401 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 has not been closed. Lease recovery is in progress. RecoveryId = 1027 for block blk_1073741842_1026 2024-12-03T13:09:56,534 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 after 2ms 2024-12-03T13:09:56,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:56,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:57,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:57,444 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:54000 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37167:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54000 dst: /127.0.0.1:37167 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:57,444 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-268027980_22 at /127.0.0.1:41670 [Receiving block BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026] {}] datanode.DataXceiver(331): 127.0.0.1:38709:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41670 dst: /127.0.0.1:38709 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38709 remote=/127.0.0.1:41670]. Total timeout mills is 60000, 59015 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:57,447 WARN [ResponseProcessor for block BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:57,447 WARN [DataStreamer for file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 block BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38709,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK], DatanodeInfoWithStorage[127.0.0.1:37167,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38709,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]) is bad. 2024-12-03T13:09:57,452 WARN [DataStreamer for file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 block BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:09:57,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741842_1027 (size=85) 2024-12-03T13:09:57,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:57,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:58,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:58,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:58,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:59,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:59,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta after 68111ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor187.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:09:59,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:09:59,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:00,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:00,535 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 after 4003ms 2024-12-03T13:10:00,535 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:10:00,540 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:10:00,540 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-03T13:10:00,541 WARN [RS_OPEN_META-regionserver/35946657df09:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,541 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C41721%2C1733231351325.meta:.meta(num 1733231353695) roll requested 2024-12-03T13:10:00,541 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41721%2C1733231351325.meta.1733231400541.meta 2024-12-03T13:10:00,541 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-03T13:10:00,542 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,542 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 34f71b7f7972afc48a6173d0ad8bf3a7 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-03T13:10:00,542 WARN [RS:0;35946657df09:41721.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=7, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,543 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 34f71b7f7972afc48a6173d0ad8bf3a7: 2024-12-03T13:10:00,543 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,543 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing ea6b9829cde6eab149d3a0635d52b20e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-03T13:10:00,547 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for ea6b9829cde6eab149d3a0635d52b20e: 2024-12-03T13:10:00,547 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-03T13:10:00,557 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T13:10:00,559 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f54b037 to 127.0.0.1:58594 2024-12-03T13:10:00,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:10:00,559 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T13:10:00,559 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2087175266, stopped=false 2024-12-03T13:10:00,559 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=35946657df09,39675,1733231351048 2024-12-03T13:10:00,574 WARN [regionserver/35946657df09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-03T13:10:00,575 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231400541.meta 2024-12-03T13:10:00,578 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40011:40011),(127.0.0.1/127.0.0.1:37715:37715)] 2024-12-03T13:10:00,579 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta is not closed yet, will try archiving it next time 2024-12-03T13:10:00,579 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C41721%2C1733231351325:(num 1733231396441) roll requested 2024-12-03T13:10:00,579 INFO [regionserver/35946657df09:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41721%2C1733231351325.1733231400579 2024-12-03T13:10:00,583 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,583 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34741,DS-5c600535-856b-41e9-bebc-e9043dadc7da,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,583 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta 2024-12-03T13:10:00,587 WARN [IPC Server handler 3 on default port 39401 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta has not been closed. Lease recovery is in progress. RecoveryId = 1029 for block blk_1073741834_1016 2024-12-03T13:10:00,590 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta after 7ms 2024-12-03T13:10:00,615 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 newFile=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231400579 2024-12-03T13:10:00,618 WARN [regionserver/35946657df09:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL 2024-12-03T13:10:00,619 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231400579 2024-12-03T13:10:00,619 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40011:40011),(127.0.0.1/127.0.0.1:37715:37715)] 2024-12-03T13:10:00,619 DEBUG [regionserver/35946657df09:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 is not closed yet, will try archiving it next time 2024-12-03T13:10:00,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:10:00,619 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:00,620 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-03T13:10:00,620 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:10:00,620 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,41721,1733231351325' ***** 2024-12-03T13:10:00,620 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T13:10:00,620 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-882385131-172.17.0.2-1733231348469:blk_1073741842_1026 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:00,620 INFO [RS:0;35946657df09:41721 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T13:10:00,620 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:10:00,620 INFO [RS:0;35946657df09:41721 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T13:10:00,620 INFO [RS:0;35946657df09:41721 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T13:10:00,620 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(3579): Received CLOSE for 34f71b7f7972afc48a6173d0ad8bf3a7 2024-12-03T13:10:00,620 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T13:10:00,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:10:00,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:00,620 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(3579): Received CLOSE for ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:10:00,621 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,41721,1733231351325 2024-12-03T13:10:00,621 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 after 1ms 2024-12-03T13:10:00,621 DEBUG [RS:0;35946657df09:41721 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:10:00,621 INFO [RS:0;35946657df09:41721 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T13:10:00,621 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 34f71b7f7972afc48a6173d0ad8bf3a7, disabling compactions & flushes 2024-12-03T13:10:00,621 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:10:00,621 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:10:00,621 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. after waiting 0 ms 2024-12-03T13:10:00,621 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:10:00,621 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 34f71b7f7972afc48a6173d0ad8bf3a7 1/1 column families, dataSize=78 B heapSize=728 B 2024-12-03T13:10:00,621 WARN [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-03T13:10:00,622 INFO [RS:0;35946657df09:41721 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T13:10:00,622 INFO [RS:0;35946657df09:41721 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T13:10:00,622 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T13:10:00,627 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.1733231396441 to hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/oldWALs/35946657df09%2C41721%2C1733231351325.1733231396441 2024-12-03T13:10:00,627 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-03T13:10:00,627 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 34f71b7f7972afc48a6173d0ad8bf3a7=hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7., ea6b9829cde6eab149d3a0635d52b20e=TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e.} 2024-12-03T13:10:00,627 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 34f71b7f7972afc48a6173d0ad8bf3a7, ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:10:00,627 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:10:00,628 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:10:00,628 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:10:00,628 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:10:00,628 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:10:00,628 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:10:00,628 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-03T13:10:00,628 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:10:00,628 WARN [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-03T13:10:00,628 WARN [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-03T13:10:00,659 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7/.tmp/info/f4b8c04c8c7b45afbe5cc7224a152807 is 45, key is default/info:d/1733231354430/Put/seqid=0 2024-12-03T13:10:00,679 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/.tmp/info/71262594511645d5b94c6063b2de8fde is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e./info:regioninfo/1733231355178/Put/seqid=0 2024-12-03T13:10:00,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741845_1031 (size=5037) 2024-12-03T13:10:00,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741845_1031 (size=5037) 2024-12-03T13:10:00,715 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7/.tmp/info/f4b8c04c8c7b45afbe5cc7224a152807 2024-12-03T13:10:00,721 INFO [regionserver/35946657df09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:10:00,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741846_1032 (size=8268) 2024-12-03T13:10:00,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741846_1032 (size=8268) 2024-12-03T13:10:00,746 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.66 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/.tmp/info/71262594511645d5b94c6063b2de8fde 2024-12-03T13:10:00,746 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7/.tmp/info/f4b8c04c8c7b45afbe5cc7224a152807 as hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7/info/f4b8c04c8c7b45afbe5cc7224a152807 2024-12-03T13:10:00,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:00,763 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7/info/f4b8c04c8c7b45afbe5cc7224a152807, entries=2, sequenceid=8, filesize=4.9 K 2024-12-03T13:10:00,774 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 34f71b7f7972afc48a6173d0ad8bf3a7 in 152ms, sequenceid=8, compaction requested=false 2024-12-03T13:10:00,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:00,793 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T13:10:00,793 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T13:10:00,812 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/.tmp/table/fa67b9e4ef11457d867c739d0ae417f5 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733231355205/Put/seqid=0 2024-12-03T13:10:00,828 DEBUG [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 34f71b7f7972afc48a6173d0ad8bf3a7, ea6b9829cde6eab149d3a0635d52b20e 2024-12-03T13:10:00,836 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/namespace/34f71b7f7972afc48a6173d0ad8bf3a7/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-03T13:10:00,837 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:10:00,837 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 34f71b7f7972afc48a6173d0ad8bf3a7: 2024-12-03T13:10:00,837 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733231353861.34f71b7f7972afc48a6173d0ad8bf3a7. 2024-12-03T13:10:00,838 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ea6b9829cde6eab149d3a0635d52b20e, disabling compactions & flushes 2024-12-03T13:10:00,838 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:10:00,838 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:10:00,838 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. after waiting 0 ms 2024-12-03T13:10:00,838 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:10:00,838 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ea6b9829cde6eab149d3a0635d52b20e 1/1 column families, dataSize=4.20 KB heapSize=4.98 KB 2024-12-03T13:10:00,838 WARN [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-03T13:10:00,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741847_1033 (size=5482) 2024-12-03T13:10:00,844 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=244 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/.tmp/table/fa67b9e4ef11457d867c739d0ae417f5 2024-12-03T13:10:00,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741847_1033 (size=5482) 2024-12-03T13:10:00,851 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/.tmp/info/71262594511645d5b94c6063b2de8fde as hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/info/71262594511645d5b94c6063b2de8fde 2024-12-03T13:10:00,858 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/info/71262594511645d5b94c6063b2de8fde, entries=20, sequenceid=16, filesize=8.1 K 2024-12-03T13:10:00,859 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/.tmp/table/fa67b9e4ef11457d867c739d0ae417f5 as hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/table/fa67b9e4ef11457d867c739d0ae417f5 2024-12-03T13:10:00,863 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e/.tmp/info/9fcaf603f1d840cab9c43d4ad504a892 is 1080, key is row1002/info:/1733231364701/Put/seqid=0 2024-12-03T13:10:00,866 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/table/fa67b9e4ef11457d867c739d0ae417f5, entries=4, sequenceid=16, filesize=5.4 K 2024-12-03T13:10:00,867 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.90 KB/2972, heapSize ~5.14 KB/5264, currentSize=0 B/0 for 1588230740 in 239ms, sequenceid=16, compaction requested=false 2024-12-03T13:10:00,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741848_1034 (size=9270) 2024-12-03T13:10:00,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741848_1034 (size=9270) 2024-12-03T13:10:00,872 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e/.tmp/info/9fcaf603f1d840cab9c43d4ad504a892 2024-12-03T13:10:00,879 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e/.tmp/info/9fcaf603f1d840cab9c43d4ad504a892 as hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e/info/9fcaf603f1d840cab9c43d4ad504a892 2024-12-03T13:10:00,881 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-12-03T13:10:00,882 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T13:10:00,882 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:10:00,882 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:10:00,882 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T13:10:00,903 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e/info/9fcaf603f1d840cab9c43d4ad504a892, entries=4, sequenceid=12, filesize=9.1 K 2024-12-03T13:10:00,913 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for ea6b9829cde6eab149d3a0635d52b20e in 75ms, sequenceid=12, compaction requested=false 2024-12-03T13:10:00,951 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/data/default/TestLogRolling-testLogRollOnPipelineRestart/ea6b9829cde6eab149d3a0635d52b20e/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-12-03T13:10:00,956 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:10:00,956 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ea6b9829cde6eab149d3a0635d52b20e: 2024-12-03T13:10:00,957 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733231354668.ea6b9829cde6eab149d3a0635d52b20e. 2024-12-03T13:10:01,028 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,41721,1733231351325; all regions closed. 2024-12-03T13:10:01,030 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325 2024-12-03T13:10:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741843_1028 (size=761) 2024-12-03T13:10:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741843_1028 (size=761) 2024-12-03T13:10:01,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:01,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:01,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:02,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:02,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:02,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:02,813 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T13:10:03,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:03,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:03,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:04,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:04,591 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta after 4008ms 2024-12-03T13:10:04,592 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta to hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/oldWALs/35946657df09%2C41721%2C1733231351325.meta.1733231353695.meta 2024-12-03T13:10:04,595 DEBUG [RS:0;35946657df09:41721 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/oldWALs 2024-12-03T13:10:04,595 INFO [RS:0;35946657df09:41721 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C41721%2C1733231351325.meta:.meta(num 1733231400541) 2024-12-03T13:10:04,595 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/WALs/35946657df09,41721,1733231351325 2024-12-03T13:10:04,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741844_1030 (size=1979) 2024-12-03T13:10:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741844_1030 (size=1979) 2024-12-03T13:10:04,603 DEBUG [RS:0;35946657df09:41721 {}] wal.AbstractFSWAL(1071): Moved 4 WAL file(s) to /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/oldWALs 2024-12-03T13:10:04,604 INFO [RS:0;35946657df09:41721 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C41721%2C1733231351325:(num 1733231400579) 2024-12-03T13:10:04,604 DEBUG [RS:0;35946657df09:41721 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:10:04,604 INFO [RS:0;35946657df09:41721 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:10:04,604 INFO [RS:0;35946657df09:41721 {}] hbase.ChoreService(370): Chore service for: regionserver/35946657df09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T13:10:04,604 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:10:04,604 INFO [RS:0;35946657df09:41721 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41721 2024-12-03T13:10:04,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:10:04,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/35946657df09,41721,1733231351325 2024-12-03T13:10:04,652 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [35946657df09,41721,1733231351325] 2024-12-03T13:10:04,652 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 35946657df09,41721,1733231351325; numProcessing=1 2024-12-03T13:10:04,660 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/35946657df09,41721,1733231351325 already deleted, retry=false 2024-12-03T13:10:04,660 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 35946657df09,41721,1733231351325 expired; onlineServers=0 2024-12-03T13:10:04,660 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,39675,1733231351048' ***** 2024-12-03T13:10:04,660 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T13:10:04,660 DEBUG [M:0;35946657df09:39675 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ac84faf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:10:04,660 INFO [M:0;35946657df09:39675 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,39675,1733231351048 2024-12-03T13:10:04,660 INFO [M:0;35946657df09:39675 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,39675,1733231351048; all regions closed. 2024-12-03T13:10:04,660 DEBUG [M:0;35946657df09:39675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:10:04,661 DEBUG [M:0;35946657df09:39675 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T13:10:04,661 DEBUG [M:0;35946657df09:39675 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T13:10:04,661 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T13:10:04,661 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231352586 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231352586,5,FailOnTimeoutGroup] 2024-12-03T13:10:04,661 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231352586 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231352586,5,FailOnTimeoutGroup] 2024-12-03T13:10:04,661 INFO [M:0;35946657df09:39675 {}] hbase.ChoreService(370): Chore service for: master/35946657df09:0 had [] on shutdown 2024-12-03T13:10:04,661 DEBUG [M:0;35946657df09:39675 {}] master.HMaster(1733): Stopping service threads 2024-12-03T13:10:04,661 INFO [M:0;35946657df09:39675 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T13:10:04,661 INFO [M:0;35946657df09:39675 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T13:10:04,661 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T13:10:04,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T13:10:04,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:04,669 DEBUG [M:0;35946657df09:39675 {}] zookeeper.ZKUtil(347): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T13:10:04,669 WARN [M:0;35946657df09:39675 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T13:10:04,669 INFO [M:0;35946657df09:39675 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-03T13:10:04,669 INFO [M:0;35946657df09:39675 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T13:10:04,669 DEBUG [M:0;35946657df09:39675 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:10:04,669 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:10:04,669 INFO [M:0;35946657df09:39675 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:10:04,669 DEBUG [M:0;35946657df09:39675 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:10:04,669 DEBUG [M:0;35946657df09:39675 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:10:04,669 DEBUG [M:0;35946657df09:39675 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:10:04,669 INFO [M:0;35946657df09:39675 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.10 KB heapSize=49.26 KB 2024-12-03T13:10:04,670 WARN [master/35946657df09:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=97, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:04,670 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog 35946657df09%2C39675%2C1733231351048:(num 1733231382585) roll requested 2024-12-03T13:10:04,670 DEBUG [M:0;35946657df09:39675 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:10:04,670 WARN [M:0;35946657df09:39675 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=97, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:04,670 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C39675%2C1733231351048.1733231404670 2024-12-03T13:10:04,719 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=97, requesting roll of WAL 2024-12-03T13:10:04,719 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231404670 2024-12-03T13:10:04,719 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40011:40011),(127.0.0.1/127.0.0.1:37715:37715)] 2024-12-03T13:10:04,719 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 is not closed yet, will try archiving it next time 2024-12-03T13:10:04,719 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:04,719 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048 2024-12-03T13:10:04,719 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36449,DS-3c6ad230-b071-49f3-ab64-82aefef2e784,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T13:10:04,720 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 2024-12-03T13:10:04,720 WARN [IPC Server handler 1 on default port 39401 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 has not been closed. Lease recovery is in progress. RecoveryId = 1036 for block blk_1073741840_1023 2024-12-03T13:10:04,722 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 after 2ms 2024-12-03T13:10:04,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38709 is added to blk_1073741849_1035 (size=93) 2024-12-03T13:10:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37167 is added to blk_1073741849_1035 (size=93) 2024-12-03T13:10:04,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:10:04,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41721-0x1019c99374d0001, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:10:04,752 INFO [RS:0;35946657df09:41721 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,41721,1733231351325; zookeeper connection closed. 2024-12-03T13:10:04,754 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7ff3a31b {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7ff3a31b 2024-12-03T13:10:04,756 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T13:10:04,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:04,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:05,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:05,723 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1016: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T13:10:05,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:05,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:05,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:05,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:06,384 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:10:06,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:06,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:06,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:07,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:07,432 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741840_1023: GenerationStamp not matched, existing replica is blk_1073741840_1020 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T13:10:07,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:07,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:08,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:08,723 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 after 4003ms 2024-12-03T13:10:08,724 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/WALs/35946657df09,39675,1733231351048/35946657df09%2C39675%2C1733231351048.1733231382585 to hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/oldWALs/35946657df09%2C39675%2C1733231351048.1733231382585 2024-12-03T13:10:08,729 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/MasterData/oldWALs/35946657df09%2C39675%2C1733231351048.1733231382585 to hdfs://localhost:39401/user/jenkins/test-data/141f6866-1002-2c79-38bb-b212d0d03c1b/oldWALs/35946657df09%2C39675%2C1733231351048.1733231382585$masterlocalwal$ 2024-12-03T13:10:08,729 INFO [M:0;35946657df09:39675 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-03T13:10:08,729 INFO [M:0;35946657df09:39675 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39675 2024-12-03T13:10:08,730 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:10:08,760 DEBUG [M:0;35946657df09:39675 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/35946657df09,39675,1733231351048 already deleted, retry=false 2024-12-03T13:10:08,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:08,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:08,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:10:08,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39675-0x1019c99374d0000, quorum=127.0.0.1:58594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:10:08,877 INFO [M:0;35946657df09:39675 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,39675,1733231351048; zookeeper connection closed. 2024-12-03T13:10:08,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74de0b9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:10:08,920 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3967a70f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:10:08,920 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:10:08,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a77606d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:10:08,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@666b6bf8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,STOPPED} 2024-12-03T13:10:08,958 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:10:08,958 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:10:08,958 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:10:08,958 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882385131-172.17.0.2-1733231348469 (Datanode Uuid 57a0dfc6-4909-418a-97b3-c4a00cee3b9b) service to localhost/127.0.0.1:39401 2024-12-03T13:10:08,960 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data3/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:10:08,960 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data4/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:10:08,960 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:10:09,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@256eaf82{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:10:09,004 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65d74252{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:10:09,004 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:10:09,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bcb5ee9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:10:09,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30c0f0c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,STOPPED} 2024-12-03T13:10:09,034 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:10:09,034 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:10:09,034 WARN [BP-882385131-172.17.0.2-1733231348469 heartbeating to localhost/127.0.0.1:39401 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882385131-172.17.0.2-1733231348469 (Datanode Uuid 538fb0ce-d0e0-4ce9-a623-330626998870) service to localhost/127.0.0.1:39401 2024-12-03T13:10:09,034 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:10:09,034 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data1/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:10:09,037 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:10:09,038 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/cluster_470490ff-aee2-6649-080a-b130b0aab159/dfs/data/data2/current/BP-882385131-172.17.0.2-1733231348469 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:10:09,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@645d76fa{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:10:09,095 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66f02920{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:10:09,095 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:10:09,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@423321b0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:10:09,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@505e56c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir/,STOPPED} 2024-12-03T13:10:09,108 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-03T13:10:09,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-03T13:10:09,184 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=98 (was 86) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:39401 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39401 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39401 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39401 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:39401 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (709188109) connection to localhost/127.0.0.1:39401 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39401 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39401 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=440 (was 410) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1282 (was 1011) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1622 (was 253) - AvailableMemoryMB LEAK? - 2024-12-03T13:10:09,193 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=98, OpenFileDescriptor=440, MaxFileDescriptor=1048576, SystemLoadAverage=1282, ProcessCount=11, AvailableMemoryMB=1622 2024-12-03T13:10:09,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T13:10:09,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.log.dir so I do NOT create it in target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0 2024-12-03T13:10:09,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/9a1d5d92-63aa-495f-ebfd-e8726b537de0/hadoop.tmp.dir so I do NOT create it in target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0 2024-12-03T13:10:09,193 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263, deleteOnExit=true 2024-12-03T13:10:09,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-03T13:10:09,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/test.cache.data in system properties and HBase conf 2024-12-03T13:10:09,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T13:10:09,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir in system properties and HBase conf 2024-12-03T13:10:09,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T13:10:09,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T13:10:09,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-03T13:10:09,194 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T13:10:09,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/nfs.dump.dir in system properties and HBase conf 2024-12-03T13:10:09,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/java.io.tmpdir in system properties and HBase conf 2024-12-03T13:10:09,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:10:09,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T13:10:09,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T13:10:09,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:09,216 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:10:09,501 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:10:09,510 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:10:09,536 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:10:09,537 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:10:09,537 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:10:09,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:10:09,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cc7e5f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:10:09,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7923c3ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:10:09,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@411b910a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/java.io.tmpdir/jetty-localhost-38443-hadoop-hdfs-3_4_1-tests_jar-_-any-487014136880001716/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:10:09,707 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49f18b4d{HTTP/1.1, (http/1.1)}{localhost:38443} 2024-12-03T13:10:09,707 INFO [Time-limited test {}] server.Server(415): Started @255472ms 2024-12-03T13:10:09,724 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:10:09,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:09,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:10,043 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:10:10,062 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:10:10,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:10:10,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:10:10,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:10:10,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32c045d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:10:10,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f2e43ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:10:10,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:10,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@336a0528{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/java.io.tmpdir/jetty-localhost-34027-hadoop-hdfs-3_4_1-tests_jar-_-any-10150774217507675072/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:10:10,271 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5161bbfc{HTTP/1.1, (http/1.1)}{localhost:34027} 2024-12-03T13:10:10,272 INFO [Time-limited test {}] server.Server(415): Started @256036ms 2024-12-03T13:10:10,275 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:10:10,400 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:10:10,418 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:10:10,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:10:10,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:10:10,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:10:10,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@336dd6d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:10:10,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@467339ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:10:10,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46b9efff{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/java.io.tmpdir/jetty-localhost-37973-hadoop-hdfs-3_4_1-tests_jar-_-any-2850497497142314626/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:10:10,618 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@510b4241{HTTP/1.1, (http/1.1)}{localhost:37973} 2024-12-03T13:10:10,619 INFO [Time-limited test {}] server.Server(415): Started @256384ms 2024-12-03T13:10:10,621 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:10:10,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:10,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:11,071 WARN [Thread-1408 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data1/current/BP-2107311685-172.17.0.2-1733231409232/current, will proceed with Du for space computation calculation, 2024-12-03T13:10:11,073 WARN [Thread-1409 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data2/current/BP-2107311685-172.17.0.2-1733231409232/current, will proceed with Du for space computation calculation, 2024-12-03T13:10:11,135 WARN [Thread-1372 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:10:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeec791040e7e23cc with lease ID 0xc08e26cbed24492b: Processing first storage report for DS-b8b2060b-1e31-4c76-a300-05b78356c41a from datanode DatanodeRegistration(127.0.0.1:38179, datanodeUuid=51db223e-2155-43e3-9f5e-e78f6cf13b91, infoPort=33231, infoSecurePort=0, ipcPort=37091, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232) 2024-12-03T13:10:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeec791040e7e23cc with lease ID 0xc08e26cbed24492b: from storage DS-b8b2060b-1e31-4c76-a300-05b78356c41a node DatanodeRegistration(127.0.0.1:38179, datanodeUuid=51db223e-2155-43e3-9f5e-e78f6cf13b91, infoPort=33231, infoSecurePort=0, ipcPort=37091, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:10:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeec791040e7e23cc with lease ID 0xc08e26cbed24492b: Processing first storage report for DS-c2e68360-b1b2-4880-8cd4-35fd2c47e725 from datanode DatanodeRegistration(127.0.0.1:38179, datanodeUuid=51db223e-2155-43e3-9f5e-e78f6cf13b91, infoPort=33231, infoSecurePort=0, ipcPort=37091, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232) 2024-12-03T13:10:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeec791040e7e23cc with lease ID 0xc08e26cbed24492b: from storage DS-c2e68360-b1b2-4880-8cd4-35fd2c47e725 node DatanodeRegistration(127.0.0.1:38179, datanodeUuid=51db223e-2155-43e3-9f5e-e78f6cf13b91, infoPort=33231, infoSecurePort=0, ipcPort=37091, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:10:11,201 WARN [Thread-1419 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data3/current/BP-2107311685-172.17.0.2-1733231409232/current, will proceed with Du for space computation calculation, 2024-12-03T13:10:11,204 WARN [Thread-1420 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data4/current/BP-2107311685-172.17.0.2-1733231409232/current, will proceed with Du for space computation calculation, 2024-12-03T13:10:11,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:11,265 WARN [Thread-1395 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:10:11,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe0932ac2b5e0b86a with lease ID 0xc08e26cbed24492c: Processing first storage report for DS-0296b262-e1f4-49b7-a0e4-6f1f2e716e5b from datanode DatanodeRegistration(127.0.0.1:42247, datanodeUuid=41434f0e-79f5-49a5-8139-eb7eb23899c2, infoPort=42113, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232) 2024-12-03T13:10:11,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe0932ac2b5e0b86a with lease ID 0xc08e26cbed24492c: from storage DS-0296b262-e1f4-49b7-a0e4-6f1f2e716e5b node DatanodeRegistration(127.0.0.1:42247, datanodeUuid=41434f0e-79f5-49a5-8139-eb7eb23899c2, infoPort=42113, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:10:11,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe0932ac2b5e0b86a with lease ID 0xc08e26cbed24492c: Processing first storage report for DS-53ff8763-38cf-463a-8528-bd1336df9634 from datanode DatanodeRegistration(127.0.0.1:42247, datanodeUuid=41434f0e-79f5-49a5-8139-eb7eb23899c2, infoPort=42113, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232) 2024-12-03T13:10:11,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe0932ac2b5e0b86a with lease ID 0xc08e26cbed24492c: from storage DS-53ff8763-38cf-463a-8528-bd1336df9634 node DatanodeRegistration(127.0.0.1:42247, datanodeUuid=41434f0e-79f5-49a5-8139-eb7eb23899c2, infoPort=42113, infoSecurePort=0, ipcPort=45133, storageInfo=lv=-57;cid=testClusterID;nsid=1097566345;c=1733231409232), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:10:11,347 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0 2024-12-03T13:10:11,383 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/zookeeper_0, clientPort=63466, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T13:10:11,388 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63466 2024-12-03T13:10:11,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:11,390 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:11,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:10:11,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:10:11,487 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819 with version=8 2024-12-03T13:10:11,487 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/hbase-staging 2024-12-03T13:10:11,490 INFO [Time-limited test {}] client.ConnectionUtils(129): master/35946657df09:0 server-side Connection retries=45 2024-12-03T13:10:11,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:10:11,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:10:11,491 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:10:11,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:10:11,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:10:11,491 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:10:11,491 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:10:11,493 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41619 2024-12-03T13:10:11,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:11,496 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:11,499 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41619 connecting to ZooKeeper ensemble=127.0.0.1:63466 2024-12-03T13:10:11,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:416190x0, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:10:11,578 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41619-0x1019c9a231c0000 connected 2024-12-03T13:10:11,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:11,795 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:10:11,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:11,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:10:11,799 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:10:11,801 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41619 2024-12-03T13:10:11,804 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41619 2024-12-03T13:10:11,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41619 2024-12-03T13:10:11,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41619 2024-12-03T13:10:11,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41619 2024-12-03T13:10:11,818 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819, hbase.cluster.distributed=false 2024-12-03T13:10:11,853 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/35946657df09:0 server-side Connection retries=45 2024-12-03T13:10:11,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:10:11,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:10:11,853 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:10:11,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:10:11,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:10:11,854 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:10:11,854 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:10:11,856 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40569 2024-12-03T13:10:11,857 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T13:10:11,858 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T13:10:11,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:11,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:11,866 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:40569 connecting to ZooKeeper ensemble=127.0.0.1:63466 2024-12-03T13:10:11,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405690x0, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:10:11,892 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40569-0x1019c9a231c0001 connected 2024-12-03T13:10:11,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:10:11,909 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:10:11,921 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:10:11,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40569 2024-12-03T13:10:11,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40569 2024-12-03T13:10:11,937 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40569 2024-12-03T13:10:11,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40569 2024-12-03T13:10:11,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40569 2024-12-03T13:10:11,949 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/35946657df09,41619,1733231411489 2024-12-03T13:10:11,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:10:11,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:10:11,967 DEBUG [M:0;35946657df09:41619 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;35946657df09:41619 2024-12-03T13:10:11,974 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/35946657df09,41619,1733231411489 2024-12-03T13:10:11,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:10:11,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:11,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:10:11,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:11,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:10:11,998 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:10:12,009 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/35946657df09,41619,1733231411489 from backup master directory 2024-12-03T13:10:12,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/35946657df09,41619,1733231411489 2024-12-03T13:10:12,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:10:12,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:10:12,022 WARN [master/35946657df09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:10:12,022 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=35946657df09,41619,1733231411489 2024-12-03T13:10:12,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:10:12,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:10:12,131 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/hbase.id with ID: be0ef4f9-f781-47ad-a91f-ae2576f15740 2024-12-03T13:10:12,197 INFO [master/35946657df09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:12,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:12,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:12,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:12,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:10:12,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:10:12,275 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:10:12,276 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T13:10:12,278 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:10:12,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:10:12,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:10:12,347 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store 2024-12-03T13:10:12,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:10:12,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:10:12,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:12,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:12,824 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:12,824 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:10:12,824 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:10:12,824 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:10:12,824 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:10:12,824 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:10:12,824 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:10:12,824 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:10:12,825 WARN [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/.initializing 2024-12-03T13:10:12,825 DEBUG [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/WALs/35946657df09,41619,1733231411489 2024-12-03T13:10:12,828 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C41619%2C1733231411489, suffix=, logDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/WALs/35946657df09,41619,1733231411489, archiveDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/oldWALs, maxLogs=10 2024-12-03T13:10:12,829 INFO [master/35946657df09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41619%2C1733231411489.1733231412829 2024-12-03T13:10:12,923 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/WALs/35946657df09,41619,1733231411489/35946657df09%2C41619%2C1733231411489.1733231412829 2024-12-03T13:10:12,923 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33231:33231),(127.0.0.1/127.0.0.1:42113:42113)] 2024-12-03T13:10:12,923 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:10:12,923 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:12,923 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:12,923 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:12,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:12,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T13:10:12,967 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:12,979 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:10:12,979 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:12,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T13:10:12,987 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:12,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:10:12,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:13,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T13:10:13,011 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:13,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:10:13,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:13,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T13:10:13,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:13,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:10:13,057 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:13,062 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:13,069 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T13:10:13,070 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:10:13,092 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:10:13,093 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854161, jitterRate=0.08612309396266937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T13:10:13,093 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:10:13,094 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T13:10:13,108 DEBUG [master/35946657df09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25fc02ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:10:13,115 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-03T13:10:13,115 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T13:10:13,115 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T13:10:13,115 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T13:10:13,117 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T13:10:13,117 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-03T13:10:13,117 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T13:10:13,140 INFO [master/35946657df09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T13:10:13,143 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T13:10:13,193 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-03T13:10:13,194 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T13:10:13,195 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T13:10:13,203 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-03T13:10:13,204 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T13:10:13,205 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T13:10:13,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:13,214 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-03T13:10:13,219 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T13:10:13,229 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T13:10:13,234 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T13:10:13,244 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T13:10:13,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:10:13,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:10:13,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:13,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:13,254 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=35946657df09,41619,1733231411489, sessionid=0x1019c9a231c0000, setting cluster-up flag (Was=false) 2024-12-03T13:10:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:13,295 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T13:10:13,296 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,41619,1733231411489 2024-12-03T13:10:13,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:13,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:13,412 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T13:10:13,413 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,41619,1733231411489 2024-12-03T13:10:13,420 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-03T13:10:13,421 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-03T13:10:13,421 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T13:10:13,421 DEBUG [master/35946657df09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 35946657df09,41619,1733231411489 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T13:10:13,421 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:10:13,421 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:10:13,422 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:10:13,422 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:10:13,422 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/35946657df09:0, corePoolSize=10, maxPoolSize=10 2024-12-03T13:10:13,422 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,422 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:10:13,422 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,430 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733231443430 2024-12-03T13:10:13,431 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:10:13,431 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-03T13:10:13,431 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T13:10:13,431 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T13:10:13,431 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T13:10:13,431 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T13:10:13,431 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T13:10:13,431 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T13:10:13,438 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,439 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T13:10:13,439 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T13:10:13,440 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T13:10:13,441 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:13,441 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:10:13,442 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T13:10:13,442 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T13:10:13,450 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231413442,5,FailOnTimeoutGroup] 2024-12-03T13:10:13,454 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231413450,5,FailOnTimeoutGroup] 2024-12-03T13:10:13,454 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,454 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T13:10:13,454 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,454 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,489 DEBUG [RS:0;35946657df09:40569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;35946657df09:40569 2024-12-03T13:10:13,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:10:13,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:10:13,506 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1008): ClusterId : be0ef4f9-f781-47ad-a91f-ae2576f15740 2024-12-03T13:10:13,506 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T13:10:13,515 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-03T13:10:13,515 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819 2024-12-03T13:10:13,539 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T13:10:13,540 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T13:10:13,575 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T13:10:13,575 DEBUG [RS:0;35946657df09:40569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@676d4568, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:10:13,579 DEBUG [RS:0;35946657df09:40569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c8f1a98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:10:13,579 INFO [RS:0;35946657df09:40569 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T13:10:13,579 INFO [RS:0;35946657df09:40569 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T13:10:13,579 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T13:10:13,588 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(3073): reportForDuty to master=35946657df09,41619,1733231411489 with isa=35946657df09/172.17.0.2:40569, startcode=1733231411853 2024-12-03T13:10:13,588 DEBUG [RS:0;35946657df09:40569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:10:13,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:10:13,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:10:13,601 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41763, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:10:13,607 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41619 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 35946657df09,40569,1733231411853 2024-12-03T13:10:13,607 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41619 {}] master.ServerManager(486): Registering regionserver=35946657df09,40569,1733231411853 2024-12-03T13:10:13,623 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819 2024-12-03T13:10:13,623 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:36523 2024-12-03T13:10:13,624 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T13:10:13,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:10:13,659 DEBUG [RS:0;35946657df09:40569 {}] zookeeper.ZKUtil(111): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/35946657df09,40569,1733231411853 2024-12-03T13:10:13,659 WARN [RS:0;35946657df09:40569 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:10:13,659 INFO [RS:0;35946657df09:40569 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:10:13,659 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853 2024-12-03T13:10:13,671 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [35946657df09,40569,1733231411853] 2024-12-03T13:10:13,716 DEBUG [RS:0;35946657df09:40569 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T13:10:13,716 INFO [RS:0;35946657df09:40569 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T13:10:13,722 INFO [RS:0;35946657df09:40569 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T13:10:13,727 INFO [RS:0;35946657df09:40569 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:10:13,727 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,732 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T13:10:13,742 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,742 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,742 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,742 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,742 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:10:13,743 DEBUG [RS:0;35946657df09:40569 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:10:13,758 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,759 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,759 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,759 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,759 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40569,1733231411853-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:10:13,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:13,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:13,803 INFO [RS:0;35946657df09:40569 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T13:10:13,803 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40569,1733231411853-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:13,834 INFO [RS:0;35946657df09:40569 {}] regionserver.Replication(204): 35946657df09,40569,1733231411853 started 2024-12-03T13:10:13,835 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1767): Serving as 35946657df09,40569,1733231411853, RpcServer on 35946657df09/172.17.0.2:40569, sessionid=0x1019c9a231c0001 2024-12-03T13:10:13,835 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T13:10:13,835 DEBUG [RS:0;35946657df09:40569 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 35946657df09,40569,1733231411853 2024-12-03T13:10:13,835 DEBUG [RS:0;35946657df09:40569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,40569,1733231411853' 2024-12-03T13:10:13,835 DEBUG [RS:0;35946657df09:40569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T13:10:13,844 DEBUG [RS:0;35946657df09:40569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T13:10:13,848 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T13:10:13,848 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T13:10:13,848 DEBUG [RS:0;35946657df09:40569 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 35946657df09,40569,1733231411853 2024-12-03T13:10:13,848 DEBUG [RS:0;35946657df09:40569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,40569,1733231411853' 2024-12-03T13:10:13,848 DEBUG [RS:0;35946657df09:40569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T13:10:13,852 DEBUG [RS:0;35946657df09:40569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T13:10:13,858 DEBUG [RS:0;35946657df09:40569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T13:10:13,858 INFO [RS:0;35946657df09:40569 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T13:10:13,859 INFO [RS:0;35946657df09:40569 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T13:10:13,961 INFO [RS:0;35946657df09:40569 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C40569%2C1733231411853, suffix=, logDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853, archiveDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/oldWALs, maxLogs=32 2024-12-03T13:10:13,962 INFO [RS:0;35946657df09:40569 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40569%2C1733231411853.1733231413962 2024-12-03T13:10:14,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:14,023 INFO [RS:0;35946657df09:40569 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231413962 2024-12-03T13:10:14,023 DEBUG [RS:0;35946657df09:40569 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42113:42113),(127.0.0.1/127.0.0.1:33231:33231)] 2024-12-03T13:10:14,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:10:14,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:10:14,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:14,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:10:14,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:10:14,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:10:14,042 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:14,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:10:14,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:10:14,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:10:14,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:14,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:10:14,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740 2024-12-03T13:10:14,060 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740 2024-12-03T13:10:14,067 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:10:14,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:10:14,078 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:10:14,081 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690794, jitterRate=-0.12161065638065338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:10:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:10:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:10:14,082 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:10:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:10:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:10:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:10:14,084 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:10:14,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:10:14,092 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:10:14,092 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-03T13:10:14,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T13:10:14,102 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T13:10:14,107 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T13:10:14,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:14,257 DEBUG [35946657df09:41619 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T13:10:14,257 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=35946657df09,40569,1733231411853 2024-12-03T13:10:14,258 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,40569,1733231411853, state=OPENING 2024-12-03T13:10:14,270 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T13:10:14,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:14,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:14,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:10:14,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:10:14,280 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=35946657df09,40569,1733231411853}] 2024-12-03T13:10:14,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:10:14,433 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T13:10:14,435 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T13:10:14,442 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-03T13:10:14,442 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:10:14,445 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C40569%2C1733231411853.meta, suffix=.meta, logDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853, archiveDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/oldWALs, maxLogs=32 2024-12-03T13:10:14,446 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40569%2C1733231411853.meta.1733231414446.meta 2024-12-03T13:10:14,513 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.meta.1733231414446.meta 2024-12-03T13:10:14,513 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33231:33231),(127.0.0.1/127.0.0.1:42113:42113)] 2024-12-03T13:10:14,513 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:10:14,513 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T13:10:14,514 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T13:10:14,514 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T13:10:14,514 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T13:10:14,514 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:14,514 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-03T13:10:14,514 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-03T13:10:14,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:10:14,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:10:14,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:14,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:10:14,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:10:14,535 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:10:14,535 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:14,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:10:14,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:10:14,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:10:14,544 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:14,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:10:14,554 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740 2024-12-03T13:10:14,571 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740 2024-12-03T13:10:14,591 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:10:14,611 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:10:14,619 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717596, jitterRate=-0.08752952516078949}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:10:14,619 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:10:14,631 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733231414432 2024-12-03T13:10:14,642 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T13:10:14,642 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-03T13:10:14,651 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40569,1733231411853 2024-12-03T13:10:14,659 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,40569,1733231411853, state=OPEN 2024-12-03T13:10:14,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:10:14,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:10:14,702 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:10:14,702 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:10:14,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T13:10:14,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=35946657df09,40569,1733231411853 in 421 msec 2024-12-03T13:10:14,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T13:10:14,707 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-12-03T13:10:14,709 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2880 sec 2024-12-03T13:10:14,709 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733231414709, completionTime=-1 2024-12-03T13:10:14,709 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T13:10:14,709 DEBUG [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-03T13:10:14,710 DEBUG [hconnection-0xe28f46b-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:10:14,712 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:10:14,716 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-03T13:10:14,716 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733231474716 2024-12-03T13:10:14,716 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733231534716 2024-12-03T13:10:14,716 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 6 msec 2024-12-03T13:10:14,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:14,779 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41619,1733231411489-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:14,779 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41619,1733231411489-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:14,779 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41619,1733231411489-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:14,780 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-35946657df09:41619, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:14,780 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:14,780 INFO [master/35946657df09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-03T13:10:14,780 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:10:14,787 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-03T13:10:14,789 DEBUG [master/35946657df09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-03T13:10:14,790 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:10:14,790 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:14,795 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:10:14,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:10:14,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:10:14,845 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d9f2c5721f7a1570faf5a2226601b62a, NAME => 'hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819 2024-12-03T13:10:14,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:10:14,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:10:14,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:14,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing d9f2c5721f7a1570faf5a2226601b62a, disabling compactions & flushes 2024-12-03T13:10:14,895 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:14,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:14,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. after waiting 0 ms 2024-12-03T13:10:14,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:14,895 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:14,895 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for d9f2c5721f7a1570faf5a2226601b62a: 2024-12-03T13:10:14,905 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:10:14,905 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733231414905"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231414905"}]},"ts":"1733231414905"} 2024-12-03T13:10:14,917 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:10:14,921 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:10:14,921 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231414921"}]},"ts":"1733231414921"} 2024-12-03T13:10:14,926 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-03T13:10:14,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d9f2c5721f7a1570faf5a2226601b62a, ASSIGN}] 2024-12-03T13:10:14,997 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d9f2c5721f7a1570faf5a2226601b62a, ASSIGN 2024-12-03T13:10:14,998 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=d9f2c5721f7a1570faf5a2226601b62a, ASSIGN; state=OFFLINE, location=35946657df09,40569,1733231411853; forceNewPlan=false, retain=false 2024-12-03T13:10:15,148 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d9f2c5721f7a1570faf5a2226601b62a, regionState=OPENING, regionLocation=35946657df09,40569,1733231411853 2024-12-03T13:10:15,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure d9f2c5721f7a1570faf5a2226601b62a, server=35946657df09,40569,1733231411853}] 2024-12-03T13:10:15,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:15,304 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:10:15,319 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:15,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => d9f2c5721f7a1570faf5a2226601b62a, NAME => 'hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:10:15,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:10:15,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:15,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:10:15,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:10:15,323 INFO [StoreOpener-d9f2c5721f7a1570faf5a2226601b62a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:10:15,339 INFO [StoreOpener-d9f2c5721f7a1570faf5a2226601b62a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d9f2c5721f7a1570faf5a2226601b62a columnFamilyName info 2024-12-03T13:10:15,339 DEBUG [StoreOpener-d9f2c5721f7a1570faf5a2226601b62a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:15,352 INFO [StoreOpener-d9f2c5721f7a1570faf5a2226601b62a-1 {}] regionserver.HStore(327): Store=d9f2c5721f7a1570faf5a2226601b62a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:10:15,355 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:10:15,359 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:10:15,361 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:10:15,368 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:10:15,373 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened d9f2c5721f7a1570faf5a2226601b62a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753910, jitterRate=-0.04135508835315704}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:10:15,373 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for d9f2c5721f7a1570faf5a2226601b62a: 2024-12-03T13:10:15,375 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a., pid=6, masterSystemTime=1733231415304 2024-12-03T13:10:15,377 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:15,377 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:15,379 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d9f2c5721f7a1570faf5a2226601b62a, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40569,1733231411853 2024-12-03T13:10:15,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T13:10:15,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T13:10:15,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=d9f2c5721f7a1570faf5a2226601b62a, ASSIGN in 390 msec 2024-12-03T13:10:15,390 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:10:15,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231415390"}]},"ts":"1733231415390"} 2024-12-03T13:10:15,390 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure d9f2c5721f7a1570faf5a2226601b62a, server=35946657df09,40569,1733231411853 in 232 msec 2024-12-03T13:10:15,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-03T13:10:15,446 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:10:15,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 666 msec 2024-12-03T13:10:15,489 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-03T13:10:15,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:10:15,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:15,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:10:15,500 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-03T13:10:15,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:10:15,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 30 msec 2024-12-03T13:10:15,543 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-03T13:10:15,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:10:15,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 29 msec 2024-12-03T13:10:15,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-03T13:10:15,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.590sec 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41619,1733231411489-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:10:15,612 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41619,1733231411489-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T13:10:15,630 DEBUG [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-03T13:10:15,630 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T13:10:15,630 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41619,1733231411489-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:10:15,681 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6954a985 to 127.0.0.1:63466 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@266f2862 2024-12-03T13:10:15,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517f8411, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:10:15,715 DEBUG [hconnection-0x4eb030ce-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:10:15,717 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:10:15,720 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=35946657df09,41619,1733231411489 2024-12-03T13:10:15,720 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:10:15,725 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-03T13:10:15,729 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T13:10:15,739 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T13:10:15,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T13:10:15,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T13:10:15,757 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:10:15,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:15,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:15,787 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:10:15,787 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:15,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-03T13:10:15,795 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:10:15,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:10:15,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:15,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741837_1013 (size=405) 2024-12-03T13:10:15,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741837_1013 (size=405) 2024-12-03T13:10:15,863 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bc6ae92ebd989e3ee03882838574c1f7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819 2024-12-03T13:10:15,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741838_1014 (size=88) 2024-12-03T13:10:15,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741838_1014 (size=88) 2024-12-03T13:10:15,928 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:15,928 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing bc6ae92ebd989e3ee03882838574c1f7, disabling compactions & flushes 2024-12-03T13:10:15,928 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:15,928 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:15,928 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. after waiting 0 ms 2024-12-03T13:10:15,928 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:15,928 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:15,928 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:10:15,931 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:10:15,932 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733231415931"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231415931"}]},"ts":"1733231415931"} 2024-12-03T13:10:15,933 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:10:15,938 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:10:15,938 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231415938"}]},"ts":"1733231415938"} 2024-12-03T13:10:15,947 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-03T13:10:15,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=bc6ae92ebd989e3ee03882838574c1f7, ASSIGN}] 2024-12-03T13:10:15,963 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=bc6ae92ebd989e3ee03882838574c1f7, ASSIGN 2024-12-03T13:10:15,965 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=bc6ae92ebd989e3ee03882838574c1f7, ASSIGN; state=OFFLINE, location=35946657df09,40569,1733231411853; forceNewPlan=false, retain=false 2024-12-03T13:10:16,115 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=bc6ae92ebd989e3ee03882838574c1f7, regionState=OPENING, regionLocation=35946657df09,40569,1733231411853 2024-12-03T13:10:16,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure bc6ae92ebd989e3ee03882838574c1f7, server=35946657df09,40569,1733231411853}] 2024-12-03T13:10:16,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:16,269 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:10:16,279 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:16,279 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => bc6ae92ebd989e3ee03882838574c1f7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:10:16,279 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:10:16,279 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:10:16,280 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:10:16,280 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:10:16,308 INFO [StoreOpener-bc6ae92ebd989e3ee03882838574c1f7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:10:16,327 INFO [StoreOpener-bc6ae92ebd989e3ee03882838574c1f7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bc6ae92ebd989e3ee03882838574c1f7 columnFamilyName info 2024-12-03T13:10:16,327 DEBUG [StoreOpener-bc6ae92ebd989e3ee03882838574c1f7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:10:16,331 INFO [StoreOpener-bc6ae92ebd989e3ee03882838574c1f7-1 {}] regionserver.HStore(327): Store=bc6ae92ebd989e3ee03882838574c1f7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:10:16,342 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:10:16,348 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:10:16,371 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:10:16,390 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:10:16,393 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened bc6ae92ebd989e3ee03882838574c1f7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797670, jitterRate=0.014290258288383484}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:10:16,394 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:10:16,398 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7., pid=11, masterSystemTime=1733231416269 2024-12-03T13:10:16,414 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:16,414 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:16,420 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=bc6ae92ebd989e3ee03882838574c1f7, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40569,1733231411853 2024-12-03T13:10:16,424 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41619 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=35946657df09,40569,1733231411853, table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=bc6ae92ebd989e3ee03882838574c1f7. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T13:10:16,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-03T13:10:16,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure bc6ae92ebd989e3ee03882838574c1f7, server=35946657df09,40569,1733231411853 in 309 msec 2024-12-03T13:10:16,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T13:10:16,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=bc6ae92ebd989e3ee03882838574c1f7, ASSIGN in 480 msec 2024-12-03T13:10:16,453 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:10:16,453 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231416453"}]},"ts":"1733231416453"} 2024-12-03T13:10:16,457 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-03T13:10:16,505 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:10:16,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 748 msec 2024-12-03T13:10:16,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:16,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:17,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:17,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:17,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:18,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:18,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:18,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:19,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:19,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:19,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:19,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:20,045 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:10:20,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,078 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:10:20,095 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T13:10:20,096 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-03T13:10:20,096 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-03T13:10:20,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:20,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:20,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:21,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:21,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:21,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:22,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:22,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:22,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:22,813 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:22,813 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T13:10:23,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:23,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:23,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:24,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:24,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:24,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:25,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:25,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:10:25,804 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-03T13:10:25,807 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:25,807 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:25,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:25,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-03T13:10:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-03T13:10:25,824 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-03T13:10:25,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T13:10:25,825 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T13:10:25,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T13:10:25,990 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:10:25,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40569 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T13:10:25,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:25,992 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing d9f2c5721f7a1570faf5a2226601b62a 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-03T13:10:26,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a/.tmp/info/9818d2dd60ad487792d98ba8377a8f38 is 45, key is default/info:d/1733231415505/Put/seqid=0 2024-12-03T13:10:26,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741839_1015 (size=5037) 2024-12-03T13:10:26,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741839_1015 (size=5037) 2024-12-03T13:10:26,051 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a/.tmp/info/9818d2dd60ad487792d98ba8377a8f38 2024-12-03T13:10:26,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a/.tmp/info/9818d2dd60ad487792d98ba8377a8f38 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a/info/9818d2dd60ad487792d98ba8377a8f38 2024-12-03T13:10:26,088 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a/info/9818d2dd60ad487792d98ba8377a8f38, entries=2, sequenceid=6, filesize=4.9 K 2024-12-03T13:10:26,090 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for d9f2c5721f7a1570faf5a2226601b62a in 98ms, sequenceid=6, compaction requested=false 2024-12-03T13:10:26,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for d9f2c5721f7a1570faf5a2226601b62a: 2024-12-03T13:10:26,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:10:26,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-03T13:10:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-03T13:10:26,099 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-03T13:10:26,099 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 270 msec 2024-12-03T13:10:26,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 283 msec 2024-12-03T13:10:26,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:26,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:26,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:27,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:27,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:27,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:28,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:28,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:28,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:29,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:29,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:29,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:30,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:30,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:30,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:31,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:31,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:31,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:32,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:32,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:32,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:33,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:33,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:33,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:34,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:34,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:34,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:35,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:35,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:35,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:35,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T13:10:35,826 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-03T13:10:35,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:35,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:35,837 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T13:10:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T13:10:35,838 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T13:10:35,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T13:10:35,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:10:35,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40569 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-03T13:10:35,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:35,996 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing bc6ae92ebd989e3ee03882838574c1f7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T13:10:36,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/28dd1b95624e4e20be88969160e2bf88 is 1080, key is row0001/info:/1733231435830/Put/seqid=0 2024-12-03T13:10:36,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741840_1016 (size=6033) 2024-12-03T13:10:36,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741840_1016 (size=6033) 2024-12-03T13:10:36,075 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/28dd1b95624e4e20be88969160e2bf88 2024-12-03T13:10:36,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/28dd1b95624e4e20be88969160e2bf88 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/28dd1b95624e4e20be88969160e2bf88 2024-12-03T13:10:36,121 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/28dd1b95624e4e20be88969160e2bf88, entries=1, sequenceid=5, filesize=5.9 K 2024-12-03T13:10:36,123 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for bc6ae92ebd989e3ee03882838574c1f7 in 127ms, sequenceid=5, compaction requested=false 2024-12-03T13:10:36,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:10:36,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:36,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-03T13:10:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-03T13:10:36,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-03T13:10:36,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 293 msec 2024-12-03T13:10:36,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 313 msec 2024-12-03T13:10:36,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:36,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:36,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:37,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:37,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:37,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:38,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:38,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:38,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:39,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:39,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:39,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:40,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:40,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:40,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:41,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:41,347 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:10:41,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:41,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:42,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:42,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:42,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:43,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:43,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:43,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:44,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:44,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:44,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:45,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:45,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:45,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T13:10:45,839 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-03T13:10:45,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T13:10:45,845 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T13:10:45,846 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T13:10:45,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T13:10:45,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:10:45,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40569 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-03T13:10:45,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:45,999 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing bc6ae92ebd989e3ee03882838574c1f7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T13:10:46,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/7fa12c62497f4b33936e8a7d1bea3a50 is 1080, key is row0002/info:/1733231445839/Put/seqid=0 2024-12-03T13:10:46,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741841_1017 (size=6033) 2024-12-03T13:10:46,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741841_1017 (size=6033) 2024-12-03T13:10:46,015 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/7fa12c62497f4b33936e8a7d1bea3a50 2024-12-03T13:10:46,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/7fa12c62497f4b33936e8a7d1bea3a50 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/7fa12c62497f4b33936e8a7d1bea3a50 2024-12-03T13:10:46,040 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/7fa12c62497f4b33936e8a7d1bea3a50, entries=1, sequenceid=9, filesize=5.9 K 2024-12-03T13:10:46,041 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for bc6ae92ebd989e3ee03882838574c1f7 in 42ms, sequenceid=9, compaction requested=false 2024-12-03T13:10:46,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:10:46,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:46,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-03T13:10:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-03T13:10:46,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-03T13:10:46,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 198 msec 2024-12-03T13:10:46,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-12-03T13:10:46,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:46,576 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:10:46,577 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:10:46,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:46,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:47,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:47,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:47,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:48,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:48,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:48,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:49,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:49,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:49,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:50,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:50,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:50,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:51,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:51,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:51,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:52,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:52,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:52,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:53,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:53,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:53,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:54,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:54,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:54,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:55,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:55,634 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T13:10:55,634 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T13:10:55,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:55,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:55,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T13:10:55,847 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-03T13:10:55,849 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40569%2C1733231411853.1733231455849 2024-12-03T13:10:55,892 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231413962 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231455849 2024-12-03T13:10:55,892 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42113:42113),(127.0.0.1/127.0.0.1:33231:33231)] 2024-12-03T13:10:55,892 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231413962 is not closed yet, will try archiving it next time 2024-12-03T13:10:55,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741833_1009 (size=6574) 2024-12-03T13:10:55,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:10:55,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-03T13:10:55,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741833_1009 (size=6574) 2024-12-03T13:10:55,903 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T13:10:55,903 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T13:10:55,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T13:10:56,055 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:10:56,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40569 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-03T13:10:56,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:56,056 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing bc6ae92ebd989e3ee03882838574c1f7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T13:10:56,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/cb3278d2c10a48e4b8121abdd4ce10e4 is 1080, key is row0003/info:/1733231455847/Put/seqid=0 2024-12-03T13:10:56,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741843_1019 (size=6033) 2024-12-03T13:10:56,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741843_1019 (size=6033) 2024-12-03T13:10:56,068 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/cb3278d2c10a48e4b8121abdd4ce10e4 2024-12-03T13:10:56,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/cb3278d2c10a48e4b8121abdd4ce10e4 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/cb3278d2c10a48e4b8121abdd4ce10e4 2024-12-03T13:10:56,083 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/cb3278d2c10a48e4b8121abdd4ce10e4, entries=1, sequenceid=13, filesize=5.9 K 2024-12-03T13:10:56,084 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for bc6ae92ebd989e3ee03882838574c1f7 in 27ms, sequenceid=13, compaction requested=true 2024-12-03T13:10:56,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:10:56,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:10:56,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-03T13:10:56,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-03T13:10:56,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-03T13:10:56,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-03T13:10:56,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-12-03T13:10:56,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:56,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:56,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:57,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:57,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:57,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:58,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:58,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:58,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:59,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:59,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:10:59,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:00,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:00,319 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region d9f2c5721f7a1570faf5a2226601b62a, had cached 0 bytes from a total of 5037 2024-12-03T13:11:00,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:00,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:01,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:01,279 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region bc6ae92ebd989e3ee03882838574c1f7, had cached 0 bytes from a total of 18099 2024-12-03T13:11:01,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:01,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:02,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:02,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:02,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:03,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:03,438 DEBUG [master/35946657df09:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=14, reused chunk count=26, reuseRatio=65.00% 2024-12-03T13:11:03,447 DEBUG [master/35946657df09:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-03T13:11:03,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:03,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:04,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:04,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:04,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:05,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:05,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:05,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:05,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-03T13:11:05,900 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-03T13:11:05,900 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:11:05,902 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:11:05,902 DEBUG [Time-limited test {}] regionserver.HStore(1540): bc6ae92ebd989e3ee03882838574c1f7/info is initiating minor compaction (all files) 2024-12-03T13:11:05,902 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:11:05,902 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:05,902 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of bc6ae92ebd989e3ee03882838574c1f7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:05,902 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/28dd1b95624e4e20be88969160e2bf88, hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/7fa12c62497f4b33936e8a7d1bea3a50, hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/cb3278d2c10a48e4b8121abdd4ce10e4] into tmpdir=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp, totalSize=17.7 K 2024-12-03T13:11:05,903 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 28dd1b95624e4e20be88969160e2bf88, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733231435830 2024-12-03T13:11:05,903 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 7fa12c62497f4b33936e8a7d1bea3a50, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733231445839 2024-12-03T13:11:05,904 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting cb3278d2c10a48e4b8121abdd4ce10e4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733231455847 2024-12-03T13:11:05,914 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): bc6ae92ebd989e3ee03882838574c1f7#info#compaction#28 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:11:05,914 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/e6b7a9fcbd374e90a5b7c41a8f4d5b93 is 1080, key is row0001/info:/1733231435830/Put/seqid=0 2024-12-03T13:11:05,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741844_1020 (size=8296) 2024-12-03T13:11:05,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741844_1020 (size=8296) 2024-12-03T13:11:05,926 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/e6b7a9fcbd374e90a5b7c41a8f4d5b93 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/e6b7a9fcbd374e90a5b7c41a8f4d5b93 2024-12-03T13:11:05,932 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bc6ae92ebd989e3ee03882838574c1f7/info of bc6ae92ebd989e3ee03882838574c1f7 into e6b7a9fcbd374e90a5b7c41a8f4d5b93(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:11:05,932 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:11:05,935 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40569%2C1733231411853.1733231465934 2024-12-03T13:11:05,961 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231455849 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231465934 2024-12-03T13:11:05,961 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42113:42113),(127.0.0.1/127.0.0.1:33231:33231)] 2024-12-03T13:11:05,961 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231455849 is not closed yet, will try archiving it next time 2024-12-03T13:11:05,962 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231413962 to hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/oldWALs/35946657df09%2C40569%2C1733231411853.1733231413962 2024-12-03T13:11:05,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741842_1018 (size=2520) 2024-12-03T13:11:05,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741842_1018 (size=2520) 2024-12-03T13:11:05,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:11:05,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:11:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T13:11:05,969 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T13:11:05,970 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T13:11:05,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T13:11:06,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,40569,1733231411853 2024-12-03T13:11:06,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40569 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T13:11:06,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:06,124 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing bc6ae92ebd989e3ee03882838574c1f7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T13:11:06,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/143e697f190b4314b2467b21dae1e868 is 1080, key is row0000/info:/1733231465933/Put/seqid=0 2024-12-03T13:11:06,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741846_1022 (size=6033) 2024-12-03T13:11:06,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741846_1022 (size=6033) 2024-12-03T13:11:06,137 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/143e697f190b4314b2467b21dae1e868 2024-12-03T13:11:06,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/143e697f190b4314b2467b21dae1e868 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/143e697f190b4314b2467b21dae1e868 2024-12-03T13:11:06,153 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/143e697f190b4314b2467b21dae1e868, entries=1, sequenceid=18, filesize=5.9 K 2024-12-03T13:11:06,158 INFO [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for bc6ae92ebd989e3ee03882838574c1f7 in 34ms, sequenceid=18, compaction requested=false 2024-12-03T13:11:06,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:11:06,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:06,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/35946657df09:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-03T13:11:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-03T13:11:06,163 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-03T13:11:06,163 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-12-03T13:11:06,165 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-12-03T13:11:06,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:06,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:06,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:07,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:07,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:07,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:08,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:08,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:08,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:09,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:09,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:09,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:10,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:10,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:10,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:11,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:11,347 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:11:11,596 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:11,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:11,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:12,137 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:11:12,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:12,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:12,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:12,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:13,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:13,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:13,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:14,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:14,781 DEBUG [master/35946657df09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d9f2c5721f7a1570faf5a2226601b62a changed from -1.0 to 0.0, refreshing cache 2024-12-03T13:11:14,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:14,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:15,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:15,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:15,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:15,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41619 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T13:11:15,971 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-03T13:11:15,973 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40569%2C1733231411853.1733231475973 2024-12-03T13:11:15,981 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231465934 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231475973 2024-12-03T13:11:15,981 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42113:42113),(127.0.0.1/127.0.0.1:33231:33231)] 2024-12-03T13:11:15,981 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231465934 is not closed yet, will try archiving it next time 2024-12-03T13:11:15,981 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231455849 to hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/oldWALs/35946657df09%2C40569%2C1733231411853.1733231455849 2024-12-03T13:11:15,981 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-03T13:11:15,981 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T13:11:15,982 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6954a985 to 127.0.0.1:63466 2024-12-03T13:11:15,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:11:15,982 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T13:11:15,982 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1276875799, stopped=false 2024-12-03T13:11:15,982 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=35946657df09,41619,1733231411489 2024-12-03T13:11:15,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741845_1021 (size=2026) 2024-12-03T13:11:15,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741845_1021 (size=2026) 2024-12-03T13:11:15,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:11:15,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:11:15,992 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-03T13:11:15,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:15,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:11:15,992 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,40569,1733231411853' ***** 2024-12-03T13:11:15,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:15,992 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T13:11:15,993 INFO [RS:0;35946657df09:40569 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T13:11:15,993 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T13:11:15,993 INFO [RS:0;35946657df09:40569 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T13:11:15,993 INFO [RS:0;35946657df09:40569 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T13:11:15,993 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(3579): Received CLOSE for bc6ae92ebd989e3ee03882838574c1f7 2024-12-03T13:11:15,994 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(3579): Received CLOSE for d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:11:15,995 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,40569,1733231411853 2024-12-03T13:11:15,995 DEBUG [RS:0;35946657df09:40569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:11:15,995 INFO [RS:0;35946657df09:40569 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T13:11:15,995 INFO [RS:0;35946657df09:40569 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T13:11:15,995 INFO [RS:0;35946657df09:40569 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T13:11:15,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:11:15,995 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T13:11:15,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:11:15,998 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing bc6ae92ebd989e3ee03882838574c1f7, disabling compactions & flushes 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:11:15,999 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1603): Online Regions={bc6ae92ebd989e3ee03882838574c1f7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7., d9f2c5721f7a1570faf5a2226601b62a=hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T13:11:15,999 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:15,999 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:11:15,999 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, bc6ae92ebd989e3ee03882838574c1f7, d9f2c5721f7a1570faf5a2226601b62a 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. after waiting 0 ms 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:11:15,999 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:15,999 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing bc6ae92ebd989e3ee03882838574c1f7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T13:11:15,999 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-03T13:11:16,006 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/55a292ecb01641ada360a8e41edc43a9 is 1080, key is row0001/info:/1733231475971/Put/seqid=0 2024-12-03T13:11:16,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741848_1024 (size=6033) 2024-12-03T13:11:16,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741848_1024 (size=6033) 2024-12-03T13:11:16,013 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/55a292ecb01641ada360a8e41edc43a9 2024-12-03T13:11:16,023 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/.tmp/info/573fecdeae7345fb96d2f136b8da9b89 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7./info:regioninfo/1733231416420/Put/seqid=0 2024-12-03T13:11:16,029 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/.tmp/info/55a292ecb01641ada360a8e41edc43a9 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/55a292ecb01641ada360a8e41edc43a9 2024-12-03T13:11:16,045 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/55a292ecb01641ada360a8e41edc43a9, entries=1, sequenceid=22, filesize=5.9 K 2024-12-03T13:11:16,051 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for bc6ae92ebd989e3ee03882838574c1f7 in 52ms, sequenceid=22, compaction requested=true 2024-12-03T13:11:16,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741849_1025 (size=8430) 2024-12-03T13:11:16,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741849_1025 (size=8430) 2024-12-03T13:11:16,060 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/.tmp/info/573fecdeae7345fb96d2f136b8da9b89 2024-12-03T13:11:16,061 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/28dd1b95624e4e20be88969160e2bf88, hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/7fa12c62497f4b33936e8a7d1bea3a50, hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/cb3278d2c10a48e4b8121abdd4ce10e4] to archive 2024-12-03T13:11:16,062 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T13:11:16,065 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/28dd1b95624e4e20be88969160e2bf88 to hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/28dd1b95624e4e20be88969160e2bf88 2024-12-03T13:11:16,067 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/7fa12c62497f4b33936e8a7d1bea3a50 to hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/7fa12c62497f4b33936e8a7d1bea3a50 2024-12-03T13:11:16,070 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/cb3278d2c10a48e4b8121abdd4ce10e4 to hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/info/cb3278d2c10a48e4b8121abdd4ce10e4 2024-12-03T13:11:16,088 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/.tmp/table/41632ce3b919455d85436309d1409a22 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733231416453/Put/seqid=0 2024-12-03T13:11:16,091 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/bc6ae92ebd989e3ee03882838574c1f7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-03T13:11:16,093 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:16,093 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for bc6ae92ebd989e3ee03882838574c1f7: 2024-12-03T13:11:16,093 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733231415756.bc6ae92ebd989e3ee03882838574c1f7. 2024-12-03T13:11:16,093 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d9f2c5721f7a1570faf5a2226601b62a, disabling compactions & flushes 2024-12-03T13:11:16,093 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:11:16,093 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:11:16,093 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. after waiting 0 ms 2024-12-03T13:11:16,093 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:11:16,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741850_1026 (size=5532) 2024-12-03T13:11:16,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741850_1026 (size=5532) 2024-12-03T13:11:16,146 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/.tmp/table/41632ce3b919455d85436309d1409a22 2024-12-03T13:11:16,156 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/namespace/d9f2c5721f7a1570faf5a2226601b62a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T13:11:16,157 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:11:16,157 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d9f2c5721f7a1570faf5a2226601b62a: 2024-12-03T13:11:16,157 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733231414780.d9f2c5721f7a1570faf5a2226601b62a. 2024-12-03T13:11:16,177 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/.tmp/info/573fecdeae7345fb96d2f136b8da9b89 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/info/573fecdeae7345fb96d2f136b8da9b89 2024-12-03T13:11:16,189 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/info/573fecdeae7345fb96d2f136b8da9b89, entries=20, sequenceid=14, filesize=8.2 K 2024-12-03T13:11:16,195 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/.tmp/table/41632ce3b919455d85436309d1409a22 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/table/41632ce3b919455d85436309d1409a22 2024-12-03T13:11:16,199 DEBUG [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-03T13:11:16,205 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/table/41632ce3b919455d85436309d1409a22, entries=4, sequenceid=14, filesize=5.4 K 2024-12-03T13:11:16,207 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 208ms, sequenceid=14, compaction requested=false 2024-12-03T13:11:16,243 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-03T13:11:16,245 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T13:11:16,245 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:11:16,245 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:11:16,245 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T13:11:16,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:16,385 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853/35946657df09%2C40569%2C1733231411853.1733231465934 to hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/oldWALs/35946657df09%2C40569%2C1733231411853.1733231465934 2024-12-03T13:11:16,399 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,40569,1733231411853; all regions closed. 2024-12-03T13:11:16,400 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853 2024-12-03T13:11:16,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741834_1010 (size=4570) 2024-12-03T13:11:16,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741834_1010 (size=4570) 2024-12-03T13:11:16,404 DEBUG [RS:0;35946657df09:40569 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/oldWALs 2024-12-03T13:11:16,404 INFO [RS:0;35946657df09:40569 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C40569%2C1733231411853.meta:.meta(num 1733231414446) 2024-12-03T13:11:16,404 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/WALs/35946657df09,40569,1733231411853 2024-12-03T13:11:16,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741847_1023 (size=1545) 2024-12-03T13:11:16,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741847_1023 (size=1545) 2024-12-03T13:11:16,408 DEBUG [RS:0;35946657df09:40569 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/oldWALs 2024-12-03T13:11:16,408 INFO [RS:0;35946657df09:40569 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C40569%2C1733231411853:(num 1733231475973) 2024-12-03T13:11:16,408 DEBUG [RS:0;35946657df09:40569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:11:16,408 INFO [RS:0;35946657df09:40569 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:11:16,409 INFO [RS:0;35946657df09:40569 {}] hbase.ChoreService(370): Chore service for: regionserver/35946657df09:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-03T13:11:16,409 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:11:16,409 INFO [RS:0;35946657df09:40569 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40569 2024-12-03T13:11:16,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/35946657df09,40569,1733231411853 2024-12-03T13:11:16,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:11:16,458 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [35946657df09,40569,1733231411853] 2024-12-03T13:11:16,459 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 35946657df09,40569,1733231411853; numProcessing=1 2024-12-03T13:11:16,466 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/35946657df09,40569,1733231411853 already deleted, retry=false 2024-12-03T13:11:16,467 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 35946657df09,40569,1733231411853 expired; onlineServers=0 2024-12-03T13:11:16,467 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,41619,1733231411489' ***** 2024-12-03T13:11:16,467 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T13:11:16,467 DEBUG [M:0;35946657df09:41619 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68b1702f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:11:16,467 INFO [M:0;35946657df09:41619 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,41619,1733231411489 2024-12-03T13:11:16,467 INFO [M:0;35946657df09:41619 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,41619,1733231411489; all regions closed. 2024-12-03T13:11:16,467 DEBUG [M:0;35946657df09:41619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:11:16,467 DEBUG [M:0;35946657df09:41619 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T13:11:16,467 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T13:11:16,467 DEBUG [M:0;35946657df09:41619 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T13:11:16,467 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231413450 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231413450,5,FailOnTimeoutGroup] 2024-12-03T13:11:16,467 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231413442 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231413442,5,FailOnTimeoutGroup] 2024-12-03T13:11:16,468 INFO [M:0;35946657df09:41619 {}] hbase.ChoreService(370): Chore service for: master/35946657df09:0 had [] on shutdown 2024-12-03T13:11:16,468 DEBUG [M:0;35946657df09:41619 {}] master.HMaster(1733): Stopping service threads 2024-12-03T13:11:16,468 INFO [M:0;35946657df09:41619 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T13:11:16,468 INFO [M:0;35946657df09:41619 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T13:11:16,468 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T13:11:16,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T13:11:16,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:16,475 DEBUG [M:0;35946657df09:41619 {}] zookeeper.ZKUtil(347): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T13:11:16,475 WARN [M:0;35946657df09:41619 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T13:11:16,475 INFO [M:0;35946657df09:41619 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-03T13:11:16,475 INFO [M:0;35946657df09:41619 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T13:11:16,475 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:11:16,475 INFO [M:0;35946657df09:41619 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:16,475 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:16,475 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:11:16,475 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:16,476 INFO [M:0;35946657df09:41619 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.09 KB heapSize=81.71 KB 2024-12-03T13:11:16,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:11:16,494 DEBUG [M:0;35946657df09:41619 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c229e2b15c0e40bb8124cc0774d15a07 is 82, key is hbase:meta,,1/info:regioninfo/1733231414651/Put/seqid=0 2024-12-03T13:11:16,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741851_1027 (size=5672) 2024-12-03T13:11:16,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741851_1027 (size=5672) 2024-12-03T13:11:16,505 INFO [M:0;35946657df09:41619 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c229e2b15c0e40bb8124cc0774d15a07 2024-12-03T13:11:16,529 DEBUG [M:0;35946657df09:41619 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9af732b6c419400e8e901ff687ba30a7 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733231416506/Put/seqid=0 2024-12-03T13:11:16,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741852_1028 (size=8357) 2024-12-03T13:11:16,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741852_1028 (size=8357) 2024-12-03T13:11:16,535 INFO [M:0;35946657df09:41619 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.48 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9af732b6c419400e8e901ff687ba30a7 2024-12-03T13:11:16,539 INFO [M:0;35946657df09:41619 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9af732b6c419400e8e901ff687ba30a7 2024-12-03T13:11:16,551 DEBUG [M:0;35946657df09:41619 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ba41c684ed5406b8cb5ff5cebbe9ec1 is 69, key is 35946657df09,40569,1733231411853/rs:state/1733231413607/Put/seqid=0 2024-12-03T13:11:16,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741853_1029 (size=5156) 2024-12-03T13:11:16,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741853_1029 (size=5156) 2024-12-03T13:11:16,556 INFO [M:0;35946657df09:41619 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ba41c684ed5406b8cb5ff5cebbe9ec1 2024-12-03T13:11:16,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:11:16,558 INFO [RS:0;35946657df09:40569 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,40569,1733231411853; zookeeper connection closed. 2024-12-03T13:11:16,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40569-0x1019c9a231c0001, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:11:16,559 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4d32a4f1 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4d32a4f1 2024-12-03T13:11:16,559 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T13:11:16,572 DEBUG [M:0;35946657df09:41619 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39b804e57467432fae84842707a0d4e9 is 52, key is load_balancer_on/state:d/1733231415722/Put/seqid=0 2024-12-03T13:11:16,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741854_1030 (size=5056) 2024-12-03T13:11:16,578 INFO [M:0;35946657df09:41619 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39b804e57467432fae84842707a0d4e9 2024-12-03T13:11:16,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741854_1030 (size=5056) 2024-12-03T13:11:16,583 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c229e2b15c0e40bb8124cc0774d15a07 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c229e2b15c0e40bb8124cc0774d15a07 2024-12-03T13:11:16,588 INFO [M:0;35946657df09:41619 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c229e2b15c0e40bb8124cc0774d15a07, entries=8, sequenceid=184, filesize=5.5 K 2024-12-03T13:11:16,589 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9af732b6c419400e8e901ff687ba30a7 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9af732b6c419400e8e901ff687ba30a7 2024-12-03T13:11:16,594 INFO [M:0;35946657df09:41619 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9af732b6c419400e8e901ff687ba30a7 2024-12-03T13:11:16,594 INFO [M:0;35946657df09:41619 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9af732b6c419400e8e901ff687ba30a7, entries=21, sequenceid=184, filesize=8.2 K 2024-12-03T13:11:16,595 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ba41c684ed5406b8cb5ff5cebbe9ec1 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6ba41c684ed5406b8cb5ff5cebbe9ec1 2024-12-03T13:11:16,600 INFO [M:0;35946657df09:41619 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6ba41c684ed5406b8cb5ff5cebbe9ec1, entries=1, sequenceid=184, filesize=5.0 K 2024-12-03T13:11:16,602 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39b804e57467432fae84842707a0d4e9 as hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/39b804e57467432fae84842707a0d4e9 2024-12-03T13:11:16,607 INFO [M:0;35946657df09:41619 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36523/user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/39b804e57467432fae84842707a0d4e9, entries=1, sequenceid=184, filesize=4.9 K 2024-12-03T13:11:16,608 INFO [M:0;35946657df09:41619 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.09 KB/66649, heapSize ~81.65 KB/83608, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=184, compaction requested=false 2024-12-03T13:11:16,609 INFO [M:0;35946657df09:41619 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:16,609 DEBUG [M:0;35946657df09:41619 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:11:16,609 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/e052dbce-122c-03fb-de3e-4698b5949819/MasterData/WALs/35946657df09,41619,1733231411489 2024-12-03T13:11:16,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42247 is added to blk_1073741830_1006 (size=79170) 2024-12-03T13:11:16,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38179 is added to blk_1073741830_1006 (size=79170) 2024-12-03T13:11:16,611 INFO [M:0;35946657df09:41619 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-03T13:11:16,611 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:11:16,611 INFO [M:0;35946657df09:41619 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41619 2024-12-03T13:11:16,620 DEBUG [M:0;35946657df09:41619 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/35946657df09,41619,1733231411489 already deleted, retry=false 2024-12-03T13:11:16,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:11:16,729 INFO [M:0;35946657df09:41619 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,41619,1733231411489; zookeeper connection closed. 2024-12-03T13:11:16,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41619-0x1019c9a231c0000, quorum=127.0.0.1:63466, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:11:16,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46b9efff{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:11:16,764 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@510b4241{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:11:16,764 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:11:16,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@467339ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:11:16,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@336dd6d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir/,STOPPED} 2024-12-03T13:11:16,765 WARN [BP-2107311685-172.17.0.2-1733231409232 heartbeating to localhost/127.0.0.1:36523 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:11:16,765 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:11:16,765 WARN [BP-2107311685-172.17.0.2-1733231409232 heartbeating to localhost/127.0.0.1:36523 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2107311685-172.17.0.2-1733231409232 (Datanode Uuid 41434f0e-79f5-49a5-8139-eb7eb23899c2) service to localhost/127.0.0.1:36523 2024-12-03T13:11:16,765 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:11:16,766 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data3/current/BP-2107311685-172.17.0.2-1733231409232 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:11:16,766 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data4/current/BP-2107311685-172.17.0.2-1733231409232 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:11:16,766 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:11:16,768 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@336a0528{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:11:16,768 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5161bbfc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:11:16,768 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:11:16,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f2e43ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:11:16,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32c045d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir/,STOPPED} 2024-12-03T13:11:16,770 WARN [BP-2107311685-172.17.0.2-1733231409232 heartbeating to localhost/127.0.0.1:36523 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:11:16,770 WARN [BP-2107311685-172.17.0.2-1733231409232 heartbeating to localhost/127.0.0.1:36523 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2107311685-172.17.0.2-1733231409232 (Datanode Uuid 51db223e-2155-43e3-9f5e-e78f6cf13b91) service to localhost/127.0.0.1:36523 2024-12-03T13:11:16,770 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:11:16,770 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:11:16,770 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data1/current/BP-2107311685-172.17.0.2-1733231409232 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:11:16,771 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/cluster_330f5210-7045-2e63-0bf6-7a8bbaa8a263/dfs/data/data2/current/BP-2107311685-172.17.0.2-1733231409232 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:11:16,771 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:11:16,777 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@411b910a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:11:16,777 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49f18b4d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:11:16,778 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:11:16,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7923c3ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:11:16,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cc7e5f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir/,STOPPED} 2024-12-03T13:11:16,792 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-03T13:11:16,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:16,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-03T13:11:16,820 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=110 (was 98) - Thread LEAK? -, OpenFileDescriptor=466 (was 440) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1036 (was 1282), ProcessCount=11 (was 11), AvailableMemoryMB=660 (was 1622) 2024-12-03T13:11:16,826 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=110, OpenFileDescriptor=466, MaxFileDescriptor=1048576, SystemLoadAverage=1036, ProcessCount=11, AvailableMemoryMB=659 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.log.dir so I do NOT create it in target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4c971e2-9f8a-9e4d-4550-a28ed08254a0/hadoop.tmp.dir so I do NOT create it in target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8, deleteOnExit=true 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/test.cache.data in system properties and HBase conf 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir in system properties and HBase conf 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T13:11:16,827 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-03T13:11:16,827 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/nfs.dump.dir in system properties and HBase conf 2024-12-03T13:11:16,828 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/java.io.tmpdir in system properties and HBase conf 2024-12-03T13:11:16,829 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:11:16,829 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T13:11:16,829 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T13:11:16,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:16,845 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:11:17,233 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:11:17,236 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:11:17,237 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:11:17,237 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:11:17,237 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:11:17,238 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:11:17,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3972b007{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:11:17,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bb5a098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:11:17,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:17,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79537413{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/java.io.tmpdir/jetty-localhost-43479-hadoop-hdfs-3_4_1-tests_jar-_-any-7460645650509095366/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:11:17,349 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53b67655{HTTP/1.1, (http/1.1)}{localhost:43479} 2024-12-03T13:11:17,349 INFO [Time-limited test {}] server.Server(415): Started @323114ms 2024-12-03T13:11:17,366 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:11:17,557 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:11:17,561 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:11:17,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:11:17,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:11:17,562 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:11:17,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49e87701{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:11:17,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b8fcc9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:11:17,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ec3473f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/java.io.tmpdir/jetty-localhost-39199-hadoop-hdfs-3_4_1-tests_jar-_-any-3980303440088602258/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:11:17,670 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2db9b638{HTTP/1.1, (http/1.1)}{localhost:39199} 2024-12-03T13:11:17,670 INFO [Time-limited test {}] server.Server(415): Started @323435ms 2024-12-03T13:11:17,671 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:11:17,701 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:11:17,706 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:11:17,706 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:11:17,707 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:11:17,707 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:11:17,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2feb9278{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:11:17,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76d55a99{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:11:17,765 INFO [regionserver/35946657df09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:11:17,809 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@290defc9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/java.io.tmpdir/jetty-localhost-32957-hadoop-hdfs-3_4_1-tests_jar-_-any-12971368212606230321/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:11:17,810 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3054128b{HTTP/1.1, (http/1.1)}{localhost:32957} 2024-12-03T13:11:17,810 INFO [Time-limited test {}] server.Server(415): Started @323575ms 2024-12-03T13:11:17,812 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:11:17,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:17,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:18,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:18,321 WARN [Thread-1732 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data1/current/BP-1105107160-172.17.0.2-1733231476851/current, will proceed with Du for space computation calculation, 2024-12-03T13:11:18,322 WARN [Thread-1733 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data2/current/BP-1105107160-172.17.0.2-1733231476851/current, will proceed with Du for space computation calculation, 2024-12-03T13:11:18,348 WARN [Thread-1697 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:11:18,351 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53ecf5ffff404b87 with lease ID 0x4e2f0e9b0d6384a0: Processing first storage report for DS-6873e182-fd41-4c74-8c20-7e90a7ddc78d from datanode DatanodeRegistration(127.0.0.1:34307, datanodeUuid=783e0338-1325-4a89-9548-84ac34f93c76, infoPort=37299, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851) 2024-12-03T13:11:18,351 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53ecf5ffff404b87 with lease ID 0x4e2f0e9b0d6384a0: from storage DS-6873e182-fd41-4c74-8c20-7e90a7ddc78d node DatanodeRegistration(127.0.0.1:34307, datanodeUuid=783e0338-1325-4a89-9548-84ac34f93c76, infoPort=37299, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:11:18,351 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53ecf5ffff404b87 with lease ID 0x4e2f0e9b0d6384a0: Processing first storage report for DS-356c3522-eec7-4526-b5b4-3bc2f7d9668b from datanode DatanodeRegistration(127.0.0.1:34307, datanodeUuid=783e0338-1325-4a89-9548-84ac34f93c76, infoPort=37299, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851) 2024-12-03T13:11:18,351 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53ecf5ffff404b87 with lease ID 0x4e2f0e9b0d6384a0: from storage DS-356c3522-eec7-4526-b5b4-3bc2f7d9668b node DatanodeRegistration(127.0.0.1:34307, datanodeUuid=783e0338-1325-4a89-9548-84ac34f93c76, infoPort=37299, infoSecurePort=0, ipcPort=42249, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:11:18,498 WARN [Thread-1745 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data4/current/BP-1105107160-172.17.0.2-1733231476851/current, will proceed with Du for space computation calculation, 2024-12-03T13:11:18,498 WARN [Thread-1744 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data3/current/BP-1105107160-172.17.0.2-1733231476851/current, will proceed with Du for space computation calculation, 2024-12-03T13:11:18,519 WARN [Thread-1720 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:11:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10c258a6af6a5669 with lease ID 0x4e2f0e9b0d6384a1: Processing first storage report for DS-dfd15464-51ed-45a4-b19a-c284540597be from datanode DatanodeRegistration(127.0.0.1:40969, datanodeUuid=587006a0-d2ce-462f-a505-7ffd679c4f3e, infoPort=43497, infoSecurePort=0, ipcPort=44395, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851) 2024-12-03T13:11:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10c258a6af6a5669 with lease ID 0x4e2f0e9b0d6384a1: from storage DS-dfd15464-51ed-45a4-b19a-c284540597be node DatanodeRegistration(127.0.0.1:40969, datanodeUuid=587006a0-d2ce-462f-a505-7ffd679c4f3e, infoPort=43497, infoSecurePort=0, ipcPort=44395, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:11:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x10c258a6af6a5669 with lease ID 0x4e2f0e9b0d6384a1: Processing first storage report for DS-ba32ee3f-db2f-4c7f-af68-b68c84974503 from datanode DatanodeRegistration(127.0.0.1:40969, datanodeUuid=587006a0-d2ce-462f-a505-7ffd679c4f3e, infoPort=43497, infoSecurePort=0, ipcPort=44395, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851) 2024-12-03T13:11:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x10c258a6af6a5669 with lease ID 0x4e2f0e9b0d6384a1: from storage DS-ba32ee3f-db2f-4c7f-af68-b68c84974503 node DatanodeRegistration(127.0.0.1:40969, datanodeUuid=587006a0-d2ce-462f-a505-7ffd679c4f3e, infoPort=43497, infoSecurePort=0, ipcPort=44395, storageInfo=lv=-57;cid=testClusterID;nsid=1581459177;c=1733231476851), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:11:18,543 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265 2024-12-03T13:11:18,546 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/zookeeper_0, clientPort=59615, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T13:11:18,546 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=59615 2024-12-03T13:11:18,547 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:18,548 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:18,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:11:18,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:11:18,561 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3 with version=8 2024-12-03T13:11:18,561 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/hbase-staging 2024-12-03T13:11:18,563 INFO [Time-limited test {}] client.ConnectionUtils(129): master/35946657df09:0 server-side Connection retries=45 2024-12-03T13:11:18,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:11:18,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:11:18,563 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:11:18,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:11:18,563 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:11:18,563 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:11:18,563 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:11:18,564 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45951 2024-12-03T13:11:18,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:18,566 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:18,569 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:45951 connecting to ZooKeeper ensemble=127.0.0.1:59615 2024-12-03T13:11:18,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:459510x0, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:11:18,611 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45951-0x1019c9b29860000 connected 2024-12-03T13:11:18,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:11:18,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:11:18,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:11:18,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45951 2024-12-03T13:11:18,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45951 2024-12-03T13:11:18,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45951 2024-12-03T13:11:18,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45951 2024-12-03T13:11:18,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45951 2024-12-03T13:11:18,678 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3, hbase.cluster.distributed=false 2024-12-03T13:11:18,697 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/35946657df09:0 server-side Connection retries=45 2024-12-03T13:11:18,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:11:18,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:11:18,697 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:11:18,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:11:18,697 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:11:18,698 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:11:18,698 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:11:18,698 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40599 2024-12-03T13:11:18,699 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T13:11:18,699 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T13:11:18,700 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:18,702 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:18,704 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:40599 connecting to ZooKeeper ensemble=127.0.0.1:59615 2024-12-03T13:11:18,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405990x0, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:11:18,712 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:405990x0, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:11:18,712 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40599-0x1019c9b29860001 connected 2024-12-03T13:11:18,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:11:18,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:11:18,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40599 2024-12-03T13:11:18,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40599 2024-12-03T13:11:18,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40599 2024-12-03T13:11:18,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40599 2024-12-03T13:11:18,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40599 2024-12-03T13:11:18,715 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/35946657df09,45951,1733231478562 2024-12-03T13:11:18,724 DEBUG [M:0;35946657df09:45951 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;35946657df09:45951 2024-12-03T13:11:18,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:11:18,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:11:18,725 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/35946657df09,45951,1733231478562 2024-12-03T13:11:18,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:11:18,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:11:18,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,733 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:11:18,734 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/35946657df09,45951,1733231478562 from backup master directory 2024-12-03T13:11:18,734 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:11:18,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/35946657df09,45951,1733231478562 2024-12-03T13:11:18,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:11:18,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:11:18,741 WARN [master/35946657df09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:11:18,741 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=35946657df09,45951,1733231478562 2024-12-03T13:11:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:11:18,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:11:18,751 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/hbase.id with ID: 9750999d-cd57-4215-9439-c447eb21ce51 2024-12-03T13:11:18,761 INFO [master/35946657df09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:18,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:11:18,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:11:18,776 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:11:18,776 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T13:11:18,777 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:11:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:11:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:11:18,783 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store 2024-12-03T13:11:18,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:11:18,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:11:18,789 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:18,789 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:11:18,789 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:18,789 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:18,789 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:11:18,789 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:18,789 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:11:18,789 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:11:18,790 WARN [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/.initializing 2024-12-03T13:11:18,790 DEBUG [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/WALs/35946657df09,45951,1733231478562 2024-12-03T13:11:18,792 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C45951%2C1733231478562, suffix=, logDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/WALs/35946657df09,45951,1733231478562, archiveDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/oldWALs, maxLogs=10 2024-12-03T13:11:18,792 INFO [master/35946657df09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C45951%2C1733231478562.1733231478792 2024-12-03T13:11:18,796 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/WALs/35946657df09,45951,1733231478562/35946657df09%2C45951%2C1733231478562.1733231478792 2024-12-03T13:11:18,796 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43497:43497),(127.0.0.1/127.0.0.1:37299:37299)] 2024-12-03T13:11:18,796 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:11:18,796 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:18,796 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,796 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T13:11:18,799 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:11:18,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T13:11:18,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:11:18,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T13:11:18,802 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:11:18,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T13:11:18,803 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:11:18,804 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,804 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,806 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T13:11:18,806 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:11:18,808 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:11:18,808 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743247, jitterRate=-0.05491279065608978}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T13:11:18,809 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:11:18,809 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T13:11:18,811 DEBUG [master/35946657df09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@771c7f33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:11:18,812 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-03T13:11:18,812 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T13:11:18,812 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T13:11:18,812 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T13:11:18,813 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T13:11:18,813 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-03T13:11:18,813 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T13:11:18,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:18,815 INFO [master/35946657df09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T13:11:18,816 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T13:11:18,825 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-03T13:11:18,825 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T13:11:18,825 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T13:11:18,833 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-03T13:11:18,833 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T13:11:18,834 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T13:11:18,841 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-03T13:11:18,842 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T13:11:18,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:18,850 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T13:11:18,851 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T13:11:18,858 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T13:11:18,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:11:18,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:11:18,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,871 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=35946657df09,45951,1733231478562, sessionid=0x1019c9b29860000, setting cluster-up flag (Was=false) 2024-12-03T13:11:18,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,916 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T13:11:18,917 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,45951,1733231478562 2024-12-03T13:11:18,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:18,958 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T13:11:18,959 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,45951,1733231478562 2024-12-03T13:11:18,961 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-03T13:11:18,961 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-03T13:11:18,961 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T13:11:18,961 DEBUG [master/35946657df09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 35946657df09,45951,1733231478562 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/35946657df09:0, corePoolSize=10, maxPoolSize=10 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:11:18,962 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:18,963 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:11:18,963 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-03T13:11:18,964 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,964 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733231508966 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T13:11:18,967 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T13:11:18,968 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T13:11:18,968 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T13:11:18,968 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T13:11:18,968 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231478968,5,FailOnTimeoutGroup] 2024-12-03T13:11:18,968 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231478968,5,FailOnTimeoutGroup] 2024-12-03T13:11:18,968 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:18,968 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T13:11:18,968 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:18,968 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:18,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:11:18,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:11:18,976 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-03T13:11:18,976 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3 2024-12-03T13:11:18,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:11:18,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:11:18,983 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:18,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:11:18,985 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:11:18,985 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:11:18,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:11:18,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:11:18,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:11:18,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:11:18,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:11:18,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:18,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:11:18,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740 2024-12-03T13:11:18,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740 2024-12-03T13:11:18,991 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:11:18,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:11:18,994 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:11:18,994 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815255, jitterRate=0.03665079176425934}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:11:18,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:11:18,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:11:18,994 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:11:18,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:11:18,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:11:18,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:11:18,995 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:11:18,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:11:18,996 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:11:18,996 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-03T13:11:18,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T13:11:18,997 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T13:11:18,997 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T13:11:19,024 DEBUG [RS:0;35946657df09:40599 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;35946657df09:40599 2024-12-03T13:11:19,025 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1008): ClusterId : 9750999d-cd57-4215-9439-c447eb21ce51 2024-12-03T13:11:19,026 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T13:11:19,034 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T13:11:19,034 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T13:11:19,042 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T13:11:19,042 DEBUG [RS:0;35946657df09:40599 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b414533, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:11:19,043 DEBUG [RS:0;35946657df09:40599 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18e6b9ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:11:19,043 INFO [RS:0;35946657df09:40599 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T13:11:19,043 INFO [RS:0;35946657df09:40599 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T13:11:19,043 DEBUG [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T13:11:19,043 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(3073): reportForDuty to master=35946657df09,45951,1733231478562 with isa=35946657df09/172.17.0.2:40599, startcode=1733231478697 2024-12-03T13:11:19,044 DEBUG [RS:0;35946657df09:40599 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:11:19,046 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53911, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:11:19,046 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45951 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 35946657df09,40599,1733231478697 2024-12-03T13:11:19,046 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45951 {}] master.ServerManager(486): Registering regionserver=35946657df09,40599,1733231478697 2024-12-03T13:11:19,047 DEBUG [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3 2024-12-03T13:11:19,047 DEBUG [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34583 2024-12-03T13:11:19,047 DEBUG [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T13:11:19,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:11:19,058 DEBUG [RS:0;35946657df09:40599 {}] zookeeper.ZKUtil(111): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/35946657df09,40599,1733231478697 2024-12-03T13:11:19,058 WARN [RS:0;35946657df09:40599 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:11:19,059 INFO [RS:0;35946657df09:40599 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:11:19,059 DEBUG [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697 2024-12-03T13:11:19,059 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [35946657df09,40599,1733231478697] 2024-12-03T13:11:19,062 DEBUG [RS:0;35946657df09:40599 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T13:11:19,062 INFO [RS:0;35946657df09:40599 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T13:11:19,063 INFO [RS:0;35946657df09:40599 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T13:11:19,063 INFO [RS:0;35946657df09:40599 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:11:19,064 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,064 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T13:11:19,064 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,064 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:11:19,065 DEBUG [RS:0;35946657df09:40599 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:11:19,065 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,065 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,065 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,065 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,065 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40599,1733231478697-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:11:19,085 INFO [RS:0;35946657df09:40599 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T13:11:19,085 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,40599,1733231478697-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,104 INFO [RS:0;35946657df09:40599 {}] regionserver.Replication(204): 35946657df09,40599,1733231478697 started 2024-12-03T13:11:19,104 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1767): Serving as 35946657df09,40599,1733231478697, RpcServer on 35946657df09/172.17.0.2:40599, sessionid=0x1019c9b29860001 2024-12-03T13:11:19,104 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T13:11:19,104 DEBUG [RS:0;35946657df09:40599 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 35946657df09,40599,1733231478697 2024-12-03T13:11:19,104 DEBUG [RS:0;35946657df09:40599 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,40599,1733231478697' 2024-12-03T13:11:19,104 DEBUG [RS:0;35946657df09:40599 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T13:11:19,105 DEBUG [RS:0;35946657df09:40599 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T13:11:19,105 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T13:11:19,105 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T13:11:19,105 DEBUG [RS:0;35946657df09:40599 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 35946657df09,40599,1733231478697 2024-12-03T13:11:19,105 DEBUG [RS:0;35946657df09:40599 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,40599,1733231478697' 2024-12-03T13:11:19,105 DEBUG [RS:0;35946657df09:40599 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T13:11:19,106 DEBUG [RS:0;35946657df09:40599 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T13:11:19,106 DEBUG [RS:0;35946657df09:40599 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T13:11:19,106 INFO [RS:0;35946657df09:40599 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T13:11:19,106 INFO [RS:0;35946657df09:40599 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T13:11:19,148 WARN [35946657df09:45951 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-03T13:11:19,208 INFO [RS:0;35946657df09:40599 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C40599%2C1733231478697, suffix=, logDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697, archiveDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/oldWALs, maxLogs=32 2024-12-03T13:11:19,209 INFO [RS:0;35946657df09:40599 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40599%2C1733231478697.1733231479209 2024-12-03T13:11:19,215 INFO [RS:0;35946657df09:40599 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231479209 2024-12-03T13:11:19,215 DEBUG [RS:0;35946657df09:40599 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43497:43497),(127.0.0.1/127.0.0.1:37299:37299)] 2024-12-03T13:11:19,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:19,398 DEBUG [35946657df09:45951 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T13:11:19,398 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:19,399 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,40599,1733231478697, state=OPENING 2024-12-03T13:11:19,450 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T13:11:19,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:19,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:19,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:11:19,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:11:19,459 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=35946657df09,40599,1733231478697}] 2024-12-03T13:11:19,612 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,40599,1733231478697 2024-12-03T13:11:19,612 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T13:11:19,614 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T13:11:19,623 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-03T13:11:19,623 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:11:19,626 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C40599%2C1733231478697.meta, suffix=.meta, logDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697, archiveDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/oldWALs, maxLogs=32 2024-12-03T13:11:19,627 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40599%2C1733231478697.meta.1733231479626.meta 2024-12-03T13:11:19,644 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.meta.1733231479626.meta 2024-12-03T13:11:19,644 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37299:37299),(127.0.0.1/127.0.0.1:43497:43497)] 2024-12-03T13:11:19,644 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:11:19,644 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T13:11:19,644 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T13:11:19,644 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T13:11:19,645 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T13:11:19,645 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:19,645 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-03T13:11:19,645 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-03T13:11:19,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:11:19,648 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:11:19,648 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:19,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:11:19,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:11:19,649 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:11:19,649 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:19,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:11:19,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:11:19,650 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:11:19,650 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:19,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:11:19,652 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740 2024-12-03T13:11:19,653 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740 2024-12-03T13:11:19,654 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:11:19,655 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:11:19,656 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870958, jitterRate=0.10748131573200226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:11:19,656 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:11:19,657 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733231479611 2024-12-03T13:11:19,659 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T13:11:19,659 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-03T13:11:19,659 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:19,660 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,40599,1733231478697, state=OPEN 2024-12-03T13:11:19,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:11:19,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:11:19,703 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:11:19,703 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:11:19,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T13:11:19,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=35946657df09,40599,1733231478697 in 244 msec 2024-12-03T13:11:19,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T13:11:19,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 711 msec 2024-12-03T13:11:19,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 753 msec 2024-12-03T13:11:19,715 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733231479715, completionTime=-1 2024-12-03T13:11:19,715 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T13:11:19,715 DEBUG [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-03T13:11:19,716 DEBUG [hconnection-0x5236463d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:11:19,717 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:11:19,718 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-03T13:11:19,718 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733231539718 2024-12-03T13:11:19,718 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733231599718 2024-12-03T13:11:19,718 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-03T13:11:19,742 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45951,1733231478562-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,742 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45951,1733231478562-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,742 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45951,1733231478562-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,742 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-35946657df09:45951, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,742 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:19,742 INFO [master/35946657df09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-03T13:11:19,743 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:11:19,744 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-03T13:11:19,744 DEBUG [master/35946657df09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-03T13:11:19,745 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:11:19,745 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:19,746 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:11:19,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:11:19,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:11:19,759 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c3c4997abc2047f0af08dcbeaa6cd4b5, NAME => 'hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3 2024-12-03T13:11:19,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:11:19,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:11:19,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:19,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing c3c4997abc2047f0af08dcbeaa6cd4b5, disabling compactions & flushes 2024-12-03T13:11:19,767 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:11:19,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:11:19,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. after waiting 0 ms 2024-12-03T13:11:19,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:11:19,767 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:11:19,767 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for c3c4997abc2047f0af08dcbeaa6cd4b5: 2024-12-03T13:11:19,768 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:11:19,769 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733231479768"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231479768"}]},"ts":"1733231479768"} 2024-12-03T13:11:19,770 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:11:19,771 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:11:19,771 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231479771"}]},"ts":"1733231479771"} 2024-12-03T13:11:19,773 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-03T13:11:19,791 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c3c4997abc2047f0af08dcbeaa6cd4b5, ASSIGN}] 2024-12-03T13:11:19,793 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c3c4997abc2047f0af08dcbeaa6cd4b5, ASSIGN 2024-12-03T13:11:19,795 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=c3c4997abc2047f0af08dcbeaa6cd4b5, ASSIGN; state=OFFLINE, location=35946657df09,40599,1733231478697; forceNewPlan=false, retain=false 2024-12-03T13:11:19,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:19,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:19,945 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c3c4997abc2047f0af08dcbeaa6cd4b5, regionState=OPENING, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:19,947 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure c3c4997abc2047f0af08dcbeaa6cd4b5, server=35946657df09,40599,1733231478697}] 2024-12-03T13:11:20,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,40599,1733231478697 2024-12-03T13:11:20,102 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:11:20,102 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => c3c4997abc2047f0af08dcbeaa6cd4b5, NAME => 'hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:11:20,103 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:11:20,103 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:20,103 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:11:20,103 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:11:20,105 INFO [StoreOpener-c3c4997abc2047f0af08dcbeaa6cd4b5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:11:20,106 INFO [StoreOpener-c3c4997abc2047f0af08dcbeaa6cd4b5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c3c4997abc2047f0af08dcbeaa6cd4b5 columnFamilyName info 2024-12-03T13:11:20,106 DEBUG [StoreOpener-c3c4997abc2047f0af08dcbeaa6cd4b5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:20,106 INFO [StoreOpener-c3c4997abc2047f0af08dcbeaa6cd4b5-1 {}] regionserver.HStore(327): Store=c3c4997abc2047f0af08dcbeaa6cd4b5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:11:20,107 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:11:20,107 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:11:20,109 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:11:20,112 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:11:20,112 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened c3c4997abc2047f0af08dcbeaa6cd4b5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883875, jitterRate=0.12390528619289398}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:11:20,112 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for c3c4997abc2047f0af08dcbeaa6cd4b5: 2024-12-03T13:11:20,113 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5., pid=6, masterSystemTime=1733231480099 2024-12-03T13:11:20,115 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:11:20,115 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:11:20,115 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c3c4997abc2047f0af08dcbeaa6cd4b5, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:20,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T13:11:20,119 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure c3c4997abc2047f0af08dcbeaa6cd4b5, server=35946657df09,40599,1733231478697 in 170 msec 2024-12-03T13:11:20,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T13:11:20,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=c3c4997abc2047f0af08dcbeaa6cd4b5, ASSIGN in 328 msec 2024-12-03T13:11:20,121 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:11:20,121 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231480121"}]},"ts":"1733231480121"} 2024-12-03T13:11:20,123 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-03T13:11:20,134 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:11:20,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 391 msec 2024-12-03T13:11:20,145 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-03T13:11:20,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:11:20,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:20,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:11:20,158 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-03T13:11:20,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:11:20,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 22 msec 2024-12-03T13:11:20,190 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-03T13:11:20,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:11:20,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 24 msec 2024-12-03T13:11:20,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-03T13:11:20,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:20,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.516sec 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45951,1733231478562-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:11:20,258 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45951,1733231478562-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T13:11:20,260 DEBUG [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-03T13:11:20,260 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T13:11:20,260 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45951,1733231478562-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:11:20,317 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63f87d2f to 127.0.0.1:59615 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4fb9338a 2024-12-03T13:11:20,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b342a3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:11:20,331 DEBUG [hconnection-0x6c859bc8-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:11:20,332 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:11:20,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=35946657df09,45951,1733231478562 2024-12-03T13:11:20,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:11:20,335 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-03T13:11:20,336 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T13:11:20,338 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56072, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T13:11:20,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45951 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T13:11:20,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45951 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T13:11:20,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45951 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:11:20,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45951 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-03T13:11:20,341 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:11:20,341 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:20,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45951 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-03T13:11:20,341 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:11:20,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45951 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:11:20,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741837_1013 (size=381) 2024-12-03T13:11:20,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741837_1013 (size=381) 2024-12-03T13:11:20,348 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 964427a12e751e568278ebaf291689c4, NAME => 'TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3 2024-12-03T13:11:20,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741838_1014 (size=64) 2024-12-03T13:11:20,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741838_1014 (size=64) 2024-12-03T13:11:20,355 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:20,355 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 964427a12e751e568278ebaf291689c4, disabling compactions & flushes 2024-12-03T13:11:20,355 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:20,355 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:20,355 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. after waiting 0 ms 2024-12-03T13:11:20,355 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:20,355 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:20,355 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:20,356 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:11:20,356 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733231480356"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231480356"}]},"ts":"1733231480356"} 2024-12-03T13:11:20,358 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:11:20,359 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:11:20,359 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231480359"}]},"ts":"1733231480359"} 2024-12-03T13:11:20,360 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-03T13:11:20,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=964427a12e751e568278ebaf291689c4, ASSIGN}] 2024-12-03T13:11:20,380 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=964427a12e751e568278ebaf291689c4, ASSIGN 2024-12-03T13:11:20,380 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=964427a12e751e568278ebaf291689c4, ASSIGN; state=OFFLINE, location=35946657df09,40599,1733231478697; forceNewPlan=false, retain=false 2024-12-03T13:11:20,531 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=964427a12e751e568278ebaf291689c4, regionState=OPENING, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:20,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697}] 2024-12-03T13:11:20,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 35946657df09,40599,1733231478697 2024-12-03T13:11:20,688 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:20,688 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 964427a12e751e568278ebaf291689c4, NAME => 'TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:11:20,689 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:20,689 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:20,689 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:20,689 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:20,690 INFO [StoreOpener-964427a12e751e568278ebaf291689c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:20,691 INFO [StoreOpener-964427a12e751e568278ebaf291689c4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 964427a12e751e568278ebaf291689c4 columnFamilyName info 2024-12-03T13:11:20,691 DEBUG [StoreOpener-964427a12e751e568278ebaf291689c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:20,692 INFO [StoreOpener-964427a12e751e568278ebaf291689c4-1 {}] regionserver.HStore(327): Store=964427a12e751e568278ebaf291689c4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:11:20,693 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4 2024-12-03T13:11:20,693 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4 2024-12-03T13:11:20,694 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:20,696 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:11:20,696 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 964427a12e751e568278ebaf291689c4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785201, jitterRate=-0.0015664845705032349}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:11:20,697 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:20,698 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4., pid=11, masterSystemTime=1733231480685 2024-12-03T13:11:20,700 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:20,700 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:20,700 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=964427a12e751e568278ebaf291689c4, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:20,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-03T13:11:20,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697 in 170 msec 2024-12-03T13:11:20,705 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T13:11:20,705 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=964427a12e751e568278ebaf291689c4, ASSIGN in 325 msec 2024-12-03T13:11:20,706 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:11:20,706 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231480706"}]},"ts":"1733231480706"} 2024-12-03T13:11:20,708 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-03T13:11:20,717 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:11:20,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 378 msec 2024-12-03T13:11:20,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:20,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:21,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:21,596 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:11:21,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:21,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:21,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:22,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:22,813 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-03T13:11:22,813 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T13:11:22,815 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T13:11:22,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:22,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:23,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:23,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:23,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:24,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:24,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:24,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:25,062 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T13:11:25,062 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-03T13:11:25,063 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-03T13:11:25,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:25,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:25,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:26,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:26,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:26,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:27,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:27,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:27,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:28,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:28,318 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:11:28,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:28,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:28,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:29,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:29,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:29,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:30,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45951 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T13:11:30,343 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-03T13:11:30,345 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-03T13:11:30,345 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:30,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 964427a12e751e568278ebaf291689c4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:11:30,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/ffafa6d61dd943dab899a29bd59078f2 is 1080, key is row0001/info:/1733231490349/Put/seqid=0 2024-12-03T13:11:30,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T13:11:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231500397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697 2024-12-03T13:11:30,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741839_1015 (size=12509) 2024-12-03T13:11:30,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741839_1015 (size=12509) 2024-12-03T13:11:30,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/ffafa6d61dd943dab899a29bd59078f2 2024-12-03T13:11:30,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/ffafa6d61dd943dab899a29bd59078f2 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/ffafa6d61dd943dab899a29bd59078f2 2024-12-03T13:11:30,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/ffafa6d61dd943dab899a29bd59078f2, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T13:11:30,432 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 964427a12e751e568278ebaf291689c4 in 69ms, sequenceid=11, compaction requested=false 2024-12-03T13:11:30,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:30,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:30,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:30,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 after 196174ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor187.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:11:31,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:31,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:31,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:32,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:32,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:32,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:33,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:33,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:33,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:34,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:34,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:34,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:35,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:35,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:35,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:36,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:36,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:36,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:37,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:37,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:37,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:38,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:38,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:38,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:39,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:39,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:39,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:40,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:40,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 964427a12e751e568278ebaf291689c4 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-03T13:11:40,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/657d57df4afc4dc887fe20b3ba9bca8c is 1080, key is row0008/info:/1733231490364/Put/seqid=0 2024-12-03T13:11:40,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741840_1016 (size=29761) 2024-12-03T13:11:40,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741840_1016 (size=29761) 2024-12-03T13:11:40,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/657d57df4afc4dc887fe20b3ba9bca8c 2024-12-03T13:11:40,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/657d57df4afc4dc887fe20b3ba9bca8c as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c 2024-12-03T13:11:40,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c, entries=23, sequenceid=37, filesize=29.1 K 2024-12-03T13:11:40,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 964427a12e751e568278ebaf291689c4 in 25ms, sequenceid=37, compaction requested=false 2024-12-03T13:11:40,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:40,426 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-03T13:11:40,426 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:11:40,427 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c because midkey is the same as first or last row 2024-12-03T13:11:40,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:40,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:41,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:41,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:41,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:42,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:42,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 964427a12e751e568278ebaf291689c4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:11:42,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/6b9d7d08ed3448dd884084f5993c1705 is 1080, key is row0031/info:/1733231500402/Put/seqid=0 2024-12-03T13:11:42,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741841_1017 (size=12509) 2024-12-03T13:11:42,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741841_1017 (size=12509) 2024-12-03T13:11:42,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/6b9d7d08ed3448dd884084f5993c1705 2024-12-03T13:11:42,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/6b9d7d08ed3448dd884084f5993c1705 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/6b9d7d08ed3448dd884084f5993c1705 2024-12-03T13:11:42,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/6b9d7d08ed3448dd884084f5993c1705, entries=7, sequenceid=47, filesize=12.2 K 2024-12-03T13:11:42,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 964427a12e751e568278ebaf291689c4 in 24ms, sequenceid=47, compaction requested=true 2024-12-03T13:11:42,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:42,434 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-03T13:11:42,434 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:11:42,434 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c because midkey is the same as first or last row 2024-12-03T13:11:42,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 964427a12e751e568278ebaf291689c4:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:11:42,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:42,435 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:11:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:42,436 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:11:42,436 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 964427a12e751e568278ebaf291689c4/info is initiating minor compaction (all files) 2024-12-03T13:11:42,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 964427a12e751e568278ebaf291689c4 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-03T13:11:42,436 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 964427a12e751e568278ebaf291689c4/info in TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:42,436 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/ffafa6d61dd943dab899a29bd59078f2, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/6b9d7d08ed3448dd884084f5993c1705] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp, totalSize=53.5 K 2024-12-03T13:11:42,437 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffafa6d61dd943dab899a29bd59078f2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733231490349 2024-12-03T13:11:42,438 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 657d57df4afc4dc887fe20b3ba9bca8c, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733231490364 2024-12-03T13:11:42,439 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b9d7d08ed3448dd884084f5993c1705, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733231500402 2024-12-03T13:11:42,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/811e933ee7cb458f8cb540fee93ec6af is 1080, key is row0038/info:/1733231502410/Put/seqid=0 2024-12-03T13:11:42,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741842_1018 (size=21141) 2024-12-03T13:11:42,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741842_1018 (size=21141) 2024-12-03T13:11:42,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/811e933ee7cb458f8cb540fee93ec6af 2024-12-03T13:11:42,460 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 964427a12e751e568278ebaf291689c4#info#compaction#41 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:11:42,461 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/378fd2853d194eb794749cb2e20e790b is 1080, key is row0001/info:/1733231490349/Put/seqid=0 2024-12-03T13:11:42,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/811e933ee7cb458f8cb540fee93ec6af as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/811e933ee7cb458f8cb540fee93ec6af 2024-12-03T13:11:42,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/811e933ee7cb458f8cb540fee93ec6af, entries=15, sequenceid=65, filesize=20.6 K 2024-12-03T13:11:42,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 964427a12e751e568278ebaf291689c4 in 36ms, sequenceid=65, compaction requested=false 2024-12-03T13:11:42,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:42,473 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=74.1 K, sizeToCheck=16.0 K 2024-12-03T13:11:42,473 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:11:42,473 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c because midkey is the same as first or last row 2024-12-03T13:11:42,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741843_1019 (size=44978) 2024-12-03T13:11:42,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741843_1019 (size=44978) 2024-12-03T13:11:42,483 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/378fd2853d194eb794749cb2e20e790b as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/378fd2853d194eb794749cb2e20e790b 2024-12-03T13:11:42,489 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 964427a12e751e568278ebaf291689c4/info of 964427a12e751e568278ebaf291689c4 into 378fd2853d194eb794749cb2e20e790b(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:11:42,489 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:42,489 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4., storeName=964427a12e751e568278ebaf291689c4/info, priority=13, startTime=1733231502434; duration=0sec 2024-12-03T13:11:42,489 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=64.6 K, sizeToCheck=16.0 K 2024-12-03T13:11:42,489 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:11:42,489 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/378fd2853d194eb794749cb2e20e790b because midkey is the same as first or last row 2024-12-03T13:11:42,489 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:42,489 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 964427a12e751e568278ebaf291689c4:info 2024-12-03T13:11:42,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:42,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:43,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:43,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:43,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:44,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:44,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:44,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 964427a12e751e568278ebaf291689c4 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-03T13:11:44,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/762986242a81492c9a44561004623f8c is 1080, key is row0053/info:/1733231502437/Put/seqid=0 2024-12-03T13:11:44,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741844_1020 (size=18987) 2024-12-03T13:11:44,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741844_1020 (size=18987) 2024-12-03T13:11:44,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T13:11:44,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231514499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697 2024-12-03T13:11:44,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:44,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:44,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/762986242a81492c9a44561004623f8c 2024-12-03T13:11:44,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/762986242a81492c9a44561004623f8c as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/762986242a81492c9a44561004623f8c 2024-12-03T13:11:44,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/762986242a81492c9a44561004623f8c, entries=13, sequenceid=82, filesize=18.5 K 2024-12-03T13:11:44,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 964427a12e751e568278ebaf291689c4 in 434ms, sequenceid=82, compaction requested=true 2024-12-03T13:11:44,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:44,894 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=83.1 K, sizeToCheck=16.0 K 2024-12-03T13:11:44,894 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:11:44,894 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/378fd2853d194eb794749cb2e20e790b because midkey is the same as first or last row 2024-12-03T13:11:44,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 964427a12e751e568278ebaf291689c4:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:11:44,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:44,894 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:11:44,895 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:11:44,895 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 964427a12e751e568278ebaf291689c4/info is initiating minor compaction (all files) 2024-12-03T13:11:44,895 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 964427a12e751e568278ebaf291689c4/info in TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:44,896 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/378fd2853d194eb794749cb2e20e790b, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/811e933ee7cb458f8cb540fee93ec6af, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/762986242a81492c9a44561004623f8c] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp, totalSize=83.1 K 2024-12-03T13:11:44,896 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 378fd2853d194eb794749cb2e20e790b, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733231490349 2024-12-03T13:11:44,896 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 811e933ee7cb458f8cb540fee93ec6af, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1733231502410 2024-12-03T13:11:44,897 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 762986242a81492c9a44561004623f8c, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733231502437 2024-12-03T13:11:44,908 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 964427a12e751e568278ebaf291689c4#info#compaction#43 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:11:44,909 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/9d05414febdf448883d835159f30617a is 1080, key is row0001/info:/1733231490349/Put/seqid=0 2024-12-03T13:11:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741845_1021 (size=75378) 2024-12-03T13:11:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741845_1021 (size=75378) 2024-12-03T13:11:44,918 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/9d05414febdf448883d835159f30617a as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a 2024-12-03T13:11:44,923 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 964427a12e751e568278ebaf291689c4/info of 964427a12e751e568278ebaf291689c4 into 9d05414febdf448883d835159f30617a(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:11:44,923 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:44,923 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4., storeName=964427a12e751e568278ebaf291689c4/info, priority=13, startTime=1733231504894; duration=0sec 2024-12-03T13:11:44,924 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=73.6 K, sizeToCheck=16.0 K 2024-12-03T13:11:44,924 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T13:11:44,924 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:44,924 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:44,924 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 964427a12e751e568278ebaf291689c4:info 2024-12-03T13:11:44,925 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45951 {}] assignment.AssignmentManager(1346): Split request from 35946657df09,40599,1733231478697, parent={ENCODED => 964427a12e751e568278ebaf291689c4, NAME => 'TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-03T13:11:44,930 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45951 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=35946657df09,40599,1733231478697 2024-12-03T13:11:44,933 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45951 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=964427a12e751e568278ebaf291689c4, daughterA=ab36b049a4b895c7ad524e3515c6427f, daughterB=2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:44,934 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=964427a12e751e568278ebaf291689c4, daughterA=ab36b049a4b895c7ad524e3515c6427f, daughterB=2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:44,934 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=964427a12e751e568278ebaf291689c4, daughterA=ab36b049a4b895c7ad524e3515c6427f, daughterB=2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:44,934 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=964427a12e751e568278ebaf291689c4, daughterA=ab36b049a4b895c7ad524e3515c6427f, daughterB=2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:44,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=964427a12e751e568278ebaf291689c4, UNASSIGN}] 2024-12-03T13:11:44,940 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=964427a12e751e568278ebaf291689c4, UNASSIGN 2024-12-03T13:11:44,940 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=964427a12e751e568278ebaf291689c4, regionState=CLOSING, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:44,941 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-03T13:11:44,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure 964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697}] 2024-12-03T13:11:45,096 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,40599,1733231478697 2024-12-03T13:11:45,098 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:45,098 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-03T13:11:45,098 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing 964427a12e751e568278ebaf291689c4, disabling compactions & flushes 2024-12-03T13:11:45,099 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:45,099 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:45,099 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. after waiting 0 ms 2024-12-03T13:11:45,099 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:45,099 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing 964427a12e751e568278ebaf291689c4 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-03T13:11:45,103 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/d3cac76e72164ca3a5820b293bb4ca4a is 1080, key is row0066/info:/1733231504461/Put/seqid=0 2024-12-03T13:11:45,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741846_1022 (size=22222) 2024-12-03T13:11:45,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741846_1022 (size=22222) 2024-12-03T13:11:45,108 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/d3cac76e72164ca3a5820b293bb4ca4a 2024-12-03T13:11:45,116 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/.tmp/info/d3cac76e72164ca3a5820b293bb4ca4a as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/d3cac76e72164ca3a5820b293bb4ca4a 2024-12-03T13:11:45,122 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/d3cac76e72164ca3a5820b293bb4ca4a, entries=16, sequenceid=102, filesize=21.7 K 2024-12-03T13:11:45,123 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=0 B/0 for 964427a12e751e568278ebaf291689c4 in 24ms, sequenceid=102, compaction requested=false 2024-12-03T13:11:45,123 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/ffafa6d61dd943dab899a29bd59078f2, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/378fd2853d194eb794749cb2e20e790b, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/6b9d7d08ed3448dd884084f5993c1705, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/811e933ee7cb458f8cb540fee93ec6af, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/762986242a81492c9a44561004623f8c] to archive 2024-12-03T13:11:45,124 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T13:11:45,126 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/ffafa6d61dd943dab899a29bd59078f2 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/ffafa6d61dd943dab899a29bd59078f2 2024-12-03T13:11:45,127 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/657d57df4afc4dc887fe20b3ba9bca8c 2024-12-03T13:11:45,128 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/378fd2853d194eb794749cb2e20e790b to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/378fd2853d194eb794749cb2e20e790b 2024-12-03T13:11:45,129 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/6b9d7d08ed3448dd884084f5993c1705 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/6b9d7d08ed3448dd884084f5993c1705 2024-12-03T13:11:45,130 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/811e933ee7cb458f8cb540fee93ec6af to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/811e933ee7cb458f8cb540fee93ec6af 2024-12-03T13:11:45,131 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/762986242a81492c9a44561004623f8c to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/762986242a81492c9a44561004623f8c 2024-12-03T13:11:45,136 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/recovered.edits/105.seqid, newMaxSeqId=105, maxSeqId=1 2024-12-03T13:11:45,137 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. 2024-12-03T13:11:45,137 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for 964427a12e751e568278ebaf291689c4: 2024-12-03T13:11:45,138 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:45,139 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=964427a12e751e568278ebaf291689c4, regionState=CLOSED 2024-12-03T13:11:45,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-03T13:11:45,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure 964427a12e751e568278ebaf291689c4, server=35946657df09,40599,1733231478697 in 198 msec 2024-12-03T13:11:45,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-03T13:11:45,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=964427a12e751e568278ebaf291689c4, UNASSIGN in 202 msec 2024-12-03T13:11:45,153 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:45,154 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 2 storefiles, region=964427a12e751e568278ebaf291689c4, threads=2 2024-12-03T13:11:45,155 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a for region: 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:45,155 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/d3cac76e72164ca3a5820b293bb4ca4a for region: 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:45,164 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/d3cac76e72164ca3a5820b293bb4ca4a, top=true 2024-12-03T13:11:45,171 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a for child: 2eded9fa458b851aa73417ed6a3398a1, parent: 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:45,171 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/d3cac76e72164ca3a5820b293bb4ca4a for region: 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:45,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741847_1023 (size=27) 2024-12-03T13:11:45,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741847_1023 (size=27) 2024-12-03T13:11:45,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741848_1024 (size=27) 2024-12-03T13:11:45,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741848_1024 (size=27) 2024-12-03T13:11:45,182 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a for region: 964427a12e751e568278ebaf291689c4 2024-12-03T13:11:45,182 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region 964427a12e751e568278ebaf291689c4 Daughter A: [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4] storefiles, Daughter B: [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a] storefiles. 2024-12-03T13:11:45,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741849_1025 (size=71) 2024-12-03T13:11:45,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741849_1025 (size=71) 2024-12-03T13:11:45,190 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:45,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741850_1026 (size=71) 2024-12-03T13:11:45,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741850_1026 (size=71) 2024-12-03T13:11:45,202 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:45,212 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/recovered.edits/105.seqid, newMaxSeqId=105, maxSeqId=-1 2024-12-03T13:11:45,214 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/recovered.edits/105.seqid, newMaxSeqId=105, maxSeqId=-1 2024-12-03T13:11:45,216 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733231505216"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733231505216"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733231505216"}]},"ts":"1733231505216"} 2024-12-03T13:11:45,216 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733231505216"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231505216"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733231505216"}]},"ts":"1733231505216"} 2024-12-03T13:11:45,216 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733231505216"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231505216"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733231505216"}]},"ts":"1733231505216"} 2024-12-03T13:11:45,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40599 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-03T13:11:45,261 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-03T13:11:45,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-03T13:11:45,265 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ab36b049a4b895c7ad524e3515c6427f, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eded9fa458b851aa73417ed6a3398a1, ASSIGN}] 2024-12-03T13:11:45,266 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eded9fa458b851aa73417ed6a3398a1, ASSIGN 2024-12-03T13:11:45,266 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ab36b049a4b895c7ad524e3515c6427f, ASSIGN 2024-12-03T13:11:45,267 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eded9fa458b851aa73417ed6a3398a1, ASSIGN; state=SPLITTING_NEW, location=35946657df09,40599,1733231478697; forceNewPlan=false, retain=false 2024-12-03T13:11:45,267 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ab36b049a4b895c7ad524e3515c6427f, ASSIGN; state=SPLITTING_NEW, location=35946657df09,40599,1733231478697; forceNewPlan=false, retain=false 2024-12-03T13:11:45,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:45,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/info/894069ae27484d1a9de18518af361b40 is 193, key is TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1./info:regioninfo/1733231505216/Put/seqid=0 2024-12-03T13:11:45,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741851_1027 (size=9423) 2024-12-03T13:11:45,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741851_1027 (size=9423) 2024-12-03T13:11:45,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/info/894069ae27484d1a9de18518af361b40 2024-12-03T13:11:45,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/table/241de5873fef4298a32b7a548ffa36c7 is 65, key is TestLogRolling-testLogRolling/table:state/1733231480706/Put/seqid=0 2024-12-03T13:11:45,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741852_1028 (size=5412) 2024-12-03T13:11:45,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741852_1028 (size=5412) 2024-12-03T13:11:45,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/table/241de5873fef4298a32b7a548ffa36c7 2024-12-03T13:11:45,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/info/894069ae27484d1a9de18518af361b40 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/info/894069ae27484d1a9de18518af361b40 2024-12-03T13:11:45,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/info/894069ae27484d1a9de18518af361b40, entries=29, sequenceid=17, filesize=9.2 K 2024-12-03T13:11:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/table/241de5873fef4298a32b7a548ffa36c7 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/table/241de5873fef4298a32b7a548ffa36c7 2024-12-03T13:11:45,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/table/241de5873fef4298a32b7a548ffa36c7, entries=4, sequenceid=17, filesize=5.3 K 2024-12-03T13:11:45,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 63ms, sequenceid=17, compaction requested=false 2024-12-03T13:11:45,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-03T13:11:45,417 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=2eded9fa458b851aa73417ed6a3398a1, regionState=OPENING, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:45,417 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=ab36b049a4b895c7ad524e3515c6427f, regionState=OPENING, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:45,419 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=15, state=RUNNABLE; OpenRegionProcedure ab36b049a4b895c7ad524e3515c6427f, server=35946657df09,40599,1733231478697}] 2024-12-03T13:11:45,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=16, state=RUNNABLE; OpenRegionProcedure 2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697}] 2024-12-03T13:11:45,571 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,40599,1733231478697 2024-12-03T13:11:45,574 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:11:45,574 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => 2eded9fa458b851aa73417ed6a3398a1, NAME => 'TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-03T13:11:45,574 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:45,574 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:45,575 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:45,575 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:45,576 INFO [StoreOpener-2eded9fa458b851aa73417ed6a3398a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:45,577 INFO [StoreOpener-2eded9fa458b851aa73417ed6a3398a1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2eded9fa458b851aa73417ed6a3398a1 columnFamilyName info 2024-12-03T13:11:45,577 DEBUG [StoreOpener-2eded9fa458b851aa73417ed6a3398a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:45,588 DEBUG [StoreOpener-2eded9fa458b851aa73417ed6a3398a1-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4->hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a-top 2024-12-03T13:11:45,593 DEBUG [StoreOpener-2eded9fa458b851aa73417ed6a3398a1-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a 2024-12-03T13:11:45,593 INFO [StoreOpener-2eded9fa458b851aa73417ed6a3398a1-1 {}] regionserver.HStore(327): Store=2eded9fa458b851aa73417ed6a3398a1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:11:45,594 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:45,595 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:45,598 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:11:45,599 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened 2eded9fa458b851aa73417ed6a3398a1; next sequenceid=106; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791924, jitterRate=0.006984218955039978}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:11:45,599 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:11:45,600 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., pid=18, masterSystemTime=1733231505570 2024-12-03T13:11:45,600 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store 2eded9fa458b851aa73417ed6a3398a1:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:11:45,600 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:45,600 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-03T13:11:45,601 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:11:45,601 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 2eded9fa458b851aa73417ed6a3398a1/info is initiating minor compaction (all files) 2024-12-03T13:11:45,601 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2eded9fa458b851aa73417ed6a3398a1/info in TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:11:45,602 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4->hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a-top, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp, totalSize=95.3 K 2024-12-03T13:11:45,602 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:11:45,602 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:11:45,602 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:11:45,602 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => ab36b049a4b895c7ad524e3515c6427f, NAME => 'TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-03T13:11:45,602 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=2eded9fa458b851aa73417ed6a3398a1, regionState=OPEN, openSeqNum=106, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:45,602 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:11:45,602 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:11:45,602 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:11:45,602 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:11:45,603 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733231490349 2024-12-03T13:11:45,604 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733231504461 2024-12-03T13:11:45,604 INFO [StoreOpener-ab36b049a4b895c7ad524e3515c6427f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:11:45,605 INFO [StoreOpener-ab36b049a4b895c7ad524e3515c6427f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ab36b049a4b895c7ad524e3515c6427f columnFamilyName info 2024-12-03T13:11:45,605 DEBUG [StoreOpener-ab36b049a4b895c7ad524e3515c6427f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:11:45,606 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=16 2024-12-03T13:11:45,606 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=16, state=SUCCESS; OpenRegionProcedure 2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 in 185 msec 2024-12-03T13:11:45,607 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eded9fa458b851aa73417ed6a3398a1, ASSIGN in 341 msec 2024-12-03T13:11:45,616 DEBUG [StoreOpener-ab36b049a4b895c7ad524e3515c6427f-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4->hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a-bottom 2024-12-03T13:11:45,616 INFO [StoreOpener-ab36b049a4b895c7ad524e3515c6427f-1 {}] regionserver.HStore(327): Store=ab36b049a4b895c7ad524e3515c6427f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:11:45,617 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:11:45,619 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:11:45,621 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:11:45,622 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened ab36b049a4b895c7ad524e3515c6427f; next sequenceid=106; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842610, jitterRate=0.07143506407737732}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:11:45,622 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for ab36b049a4b895c7ad524e3515c6427f: 2024-12-03T13:11:45,623 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f., pid=17, masterSystemTime=1733231505570 2024-12-03T13:11:45,623 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store ab36b049a4b895c7ad524e3515c6427f:info, priority=-2147483648, current under compaction store size is 2 2024-12-03T13:11:45,623 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:45,623 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-03T13:11:45,624 INFO [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:11:45,624 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.HStore(1540): ab36b049a4b895c7ad524e3515c6427f/info is initiating minor compaction (all files) 2024-12-03T13:11:45,624 INFO [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ab36b049a4b895c7ad524e3515c6427f/info in TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:11:45,624 INFO [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4->hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a-bottom] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/.tmp, totalSize=73.6 K 2024-12-03T13:11:45,625 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733231490349 2024-12-03T13:11:45,625 DEBUG [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:11:45,626 INFO [RS_OPEN_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:11:45,626 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=ab36b049a4b895c7ad524e3515c6427f, regionState=OPEN, openSeqNum=106, regionLocation=35946657df09,40599,1733231478697 2024-12-03T13:11:45,626 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eded9fa458b851aa73417ed6a3398a1#info#compaction#47 average throughput is 20.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:11:45,627 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/41b578e5bb814188acc76e9b3b0fa908 is 1080, key is row0062/info:/1733231502454/Put/seqid=0 2024-12-03T13:11:45,630 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=15 2024-12-03T13:11:45,630 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=15, state=SUCCESS; OpenRegionProcedure ab36b049a4b895c7ad524e3515c6427f, server=35946657df09,40599,1733231478697 in 208 msec 2024-12-03T13:11:45,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=12 2024-12-03T13:11:45,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=ab36b049a4b895c7ad524e3515c6427f, ASSIGN in 365 msec 2024-12-03T13:11:45,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=964427a12e751e568278ebaf291689c4, daughterA=ab36b049a4b895c7ad524e3515c6427f, daughterB=2eded9fa458b851aa73417ed6a3398a1 in 702 msec 2024-12-03T13:11:45,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741853_1029 (size=26696) 2024-12-03T13:11:45,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741853_1029 (size=26696) 2024-12-03T13:11:45,643 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/41b578e5bb814188acc76e9b3b0fa908 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/41b578e5bb814188acc76e9b3b0fa908 2024-12-03T13:11:45,650 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 2 (all) file(s) in 2eded9fa458b851aa73417ed6a3398a1/info of 2eded9fa458b851aa73417ed6a3398a1 into 41b578e5bb814188acc76e9b3b0fa908(size=26.1 K), total size for store is 26.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:11:45,650 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:11:45,650 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., storeName=2eded9fa458b851aa73417ed6a3398a1/info, priority=14, startTime=1733231505600; duration=0sec 2024-12-03T13:11:45,650 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:45,650 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eded9fa458b851aa73417ed6a3398a1:info 2024-12-03T13:11:45,651 INFO [RS:0;35946657df09:40599-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ab36b049a4b895c7ad524e3515c6427f#info#compaction#48 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:11:45,651 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/.tmp/info/3d962c9d33684f8995040c02dc0b74a7 is 1080, key is row0001/info:/1733231490349/Put/seqid=0 2024-12-03T13:11:45,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741854_1030 (size=70862) 2024-12-03T13:11:45,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741854_1030 (size=70862) 2024-12-03T13:11:45,665 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/.tmp/info/3d962c9d33684f8995040c02dc0b74a7 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/info/3d962c9d33684f8995040c02dc0b74a7 2024-12-03T13:11:45,672 INFO [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in ab36b049a4b895c7ad524e3515c6427f/info of ab36b049a4b895c7ad524e3515c6427f into 3d962c9d33684f8995040c02dc0b74a7(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:11:45,672 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ab36b049a4b895c7ad524e3515c6427f: 2024-12-03T13:11:45,672 INFO [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f., storeName=ab36b049a4b895c7ad524e3515c6427f/info, priority=15, startTime=1733231505623; duration=0sec 2024-12-03T13:11:45,672 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:11:45,672 DEBUG [RS:0;35946657df09:40599-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ab36b049a4b895c7ad524e3515c6427f:info 2024-12-03T13:11:45,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:45,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:46,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:46,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:46,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 after 196670ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor187.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:11:46,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:47,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:47,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:47,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:48,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:48,543 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:11:48,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:48,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:49,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:49,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:49,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:50,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:50,679 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:11:50,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,680 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:11:50,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:50,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:51,043 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:11:51,044 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:11:51,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:51,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:51,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:52,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:52,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:52,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:53,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:53,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:53,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:54,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:54,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231524511, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733231480338.964427a12e751e568278ebaf291689c4. is not online on 35946657df09,40599,1733231478697 2024-12-03T13:11:54,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:54,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:55,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:55,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:55,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:56,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:56,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:56,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:57,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:57,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:57,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:58,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:58,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:58,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:59,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:59,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:11:59,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:00,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:00,402 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T13:12:00,402 INFO [master/35946657df09:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T13:12:00,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:00,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:01,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:01,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:01,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:02,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:02,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:02,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:03,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:03,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:03,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:04,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:04,645 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-03T13:12:04,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:04,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:05,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:05,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:05,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:06,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:06,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:06,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:07,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:07,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta after 196199ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor187.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T13:12:07,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:07,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:08,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:08,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:08,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:09,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:09,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:09,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:10,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:10,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:10,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:11,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:11,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:11,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:12,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:12,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:12,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:13,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:13,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:14,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:14,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:14,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:12:14,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/4a6300ea38cd4bcba232c1eefd5a9b9b is 1080, key is row0082/info:/1733231534622/Put/seqid=0 2024-12-03T13:12:14,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741855_1031 (size=12509) 2024-12-03T13:12:14,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741855_1031 (size=12509) 2024-12-03T13:12:14,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:15,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/4a6300ea38cd4bcba232c1eefd5a9b9b 2024-12-03T13:12:15,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/4a6300ea38cd4bcba232c1eefd5a9b9b as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4a6300ea38cd4bcba232c1eefd5a9b9b 2024-12-03T13:12:15,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4a6300ea38cd4bcba232c1eefd5a9b9b, entries=7, sequenceid=116, filesize=12.2 K 2024-12-03T13:12:15,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8608 for 2eded9fa458b851aa73417ed6a3398a1 in 432ms, sequenceid=116, compaction requested=false 2024-12-03T13:12:15,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:15,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:15,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:15,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:16,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:16,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:16,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:16,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-03T13:12:16,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/8c08d5bd51094b89a4eefab23d1d0c95 is 1080, key is row0089/info:/1733231534636/Put/seqid=0 2024-12-03T13:12:16,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741856_1032 (size=14663) 2024-12-03T13:12:16,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741856_1032 (size=14663) 2024-12-03T13:12:16,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/8c08d5bd51094b89a4eefab23d1d0c95 2024-12-03T13:12:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/8c08d5bd51094b89a4eefab23d1d0c95 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8c08d5bd51094b89a4eefab23d1d0c95 2024-12-03T13:12:16,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8c08d5bd51094b89a4eefab23d1d0c95, entries=9, sequenceid=128, filesize=14.3 K 2024-12-03T13:12:16,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=16.81 KB/17216 for 2eded9fa458b851aa73417ed6a3398a1 in 30ms, sequenceid=128, compaction requested=true 2024-12-03T13:12:16,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:16,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2eded9fa458b851aa73417ed6a3398a1:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:12:16,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:16,686 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:12:16,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:16,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-03T13:12:16,688 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53868 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:12:16,688 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 2eded9fa458b851aa73417ed6a3398a1/info is initiating minor compaction (all files) 2024-12-03T13:12:16,688 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2eded9fa458b851aa73417ed6a3398a1/info in TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:12:16,688 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/41b578e5bb814188acc76e9b3b0fa908, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4a6300ea38cd4bcba232c1eefd5a9b9b, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8c08d5bd51094b89a4eefab23d1d0c95] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp, totalSize=52.6 K 2024-12-03T13:12:16,689 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41b578e5bb814188acc76e9b3b0fa908, keycount=20, bloomtype=ROW, size=26.1 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733231502454 2024-12-03T13:12:16,689 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a6300ea38cd4bcba232c1eefd5a9b9b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733231534622 2024-12-03T13:12:16,690 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c08d5bd51094b89a4eefab23d1d0c95, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733231534636 2024-12-03T13:12:16,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/f1ad0f9025a24e62b3c5bf6c84c4afca is 1080, key is row0098/info:/1733231536657/Put/seqid=0 2024-12-03T13:12:16,705 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eded9fa458b851aa73417ed6a3398a1#info#compaction#52 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:12:16,705 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/15e4ad4806534d3ebbd033cf7328d103 is 1080, key is row0062/info:/1733231502454/Put/seqid=0 2024-12-03T13:12:16,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T13:12:16,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231546705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 2024-12-03T13:12:16,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741857_1033 (size=24394) 2024-12-03T13:12:16,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741857_1033 (size=24394) 2024-12-03T13:12:16,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741858_1034 (size=44066) 2024-12-03T13:12:16,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741858_1034 (size=44066) 2024-12-03T13:12:16,742 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/15e4ad4806534d3ebbd033cf7328d103 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/15e4ad4806534d3ebbd033cf7328d103 2024-12-03T13:12:16,748 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2eded9fa458b851aa73417ed6a3398a1/info of 2eded9fa458b851aa73417ed6a3398a1 into 15e4ad4806534d3ebbd033cf7328d103(size=43.0 K), total size for store is 43.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:12:16,748 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:16,748 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., storeName=2eded9fa458b851aa73417ed6a3398a1/info, priority=13, startTime=1733231536686; duration=0sec 2024-12-03T13:12:16,748 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:16,748 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eded9fa458b851aa73417ed6a3398a1:info 2024-12-03T13:12:16,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:17,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=149 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/f1ad0f9025a24e62b3c5bf6c84c4afca 2024-12-03T13:12:17,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/f1ad0f9025a24e62b3c5bf6c84c4afca as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/f1ad0f9025a24e62b3c5bf6c84c4afca 2024-12-03T13:12:17,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/f1ad0f9025a24e62b3c5bf6c84c4afca, entries=18, sequenceid=149, filesize=23.8 K 2024-12-03T13:12:17,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 2eded9fa458b851aa73417ed6a3398a1 in 435ms, sequenceid=149, compaction requested=false 2024-12-03T13:12:17,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:17,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:17,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:17,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:18,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:18,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:18,543 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:12:18,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:19,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:19,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:19,788 DEBUG [master/35946657df09:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c3c4997abc2047f0af08dcbeaa6cd4b5 changed from -1.0 to 0.0, refreshing cache 2024-12-03T13:12:19,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:20,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:20,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:20,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:21,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:21,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:21,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:22,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:22,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:22,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:23,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:23,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:23,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:24,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:24,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:24,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:25,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:25,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:25,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:26,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:26,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:26,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:26,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-03T13:12:26,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/6543555a2b854707872764c34a852c0e is 1080, key is row0116/info:/1733231536688/Put/seqid=0 2024-12-03T13:12:26,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741859_1035 (size=17906) 2024-12-03T13:12:26,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741859_1035 (size=17906) 2024-12-03T13:12:26,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:27,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/6543555a2b854707872764c34a852c0e 2024-12-03T13:12:27,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/6543555a2b854707872764c34a852c0e as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6543555a2b854707872764c34a852c0e 2024-12-03T13:12:27,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6543555a2b854707872764c34a852c0e, entries=12, sequenceid=165, filesize=17.5 K 2024-12-03T13:12:27,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 2eded9fa458b851aa73417ed6a3398a1 in 426ms, sequenceid=165, compaction requested=true 2024-12-03T13:12:27,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:27,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2eded9fa458b851aa73417ed6a3398a1:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:12:27,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:27,161 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:12:27,162 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86366 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:12:27,163 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 2eded9fa458b851aa73417ed6a3398a1/info is initiating minor compaction (all files) 2024-12-03T13:12:27,163 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2eded9fa458b851aa73417ed6a3398a1/info in TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:12:27,163 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/15e4ad4806534d3ebbd033cf7328d103, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/f1ad0f9025a24e62b3c5bf6c84c4afca, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6543555a2b854707872764c34a852c0e] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp, totalSize=84.3 K 2024-12-03T13:12:27,163 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15e4ad4806534d3ebbd033cf7328d103, keycount=36, bloomtype=ROW, size=43.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733231502454 2024-12-03T13:12:27,163 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1ad0f9025a24e62b3c5bf6c84c4afca, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1733231536657 2024-12-03T13:12:27,164 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6543555a2b854707872764c34a852c0e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733231536688 2024-12-03T13:12:27,189 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eded9fa458b851aa73417ed6a3398a1#info#compaction#54 average throughput is 22.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:12:27,189 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/05c4d541cd354f06a62c1da69d2eaaa3 is 1080, key is row0062/info:/1733231502454/Put/seqid=0 2024-12-03T13:12:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741860_1036 (size=76649) 2024-12-03T13:12:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741860_1036 (size=76649) 2024-12-03T13:12:27,212 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/05c4d541cd354f06a62c1da69d2eaaa3 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/05c4d541cd354f06a62c1da69d2eaaa3 2024-12-03T13:12:27,218 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2eded9fa458b851aa73417ed6a3398a1/info of 2eded9fa458b851aa73417ed6a3398a1 into 05c4d541cd354f06a62c1da69d2eaaa3(size=74.9 K), total size for store is 74.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:12:27,218 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:27,218 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., storeName=2eded9fa458b851aa73417ed6a3398a1/info, priority=13, startTime=1733231547161; duration=0sec 2024-12-03T13:12:27,218 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:27,218 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eded9fa458b851aa73417ed6a3398a1:info 2024-12-03T13:12:27,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:27,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:27,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:28,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:28,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:28,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:28,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:12:28,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/3d4676a9f0a042c39d9864ae50e0a167 is 1080, key is row0128/info:/1733231546736/Put/seqid=0 2024-12-03T13:12:28,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T13:12:28,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231558770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 2024-12-03T13:12:28,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741861_1037 (size=12516) 2024-12-03T13:12:28,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741861_1037 (size=12516) 2024-12-03T13:12:28,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/3d4676a9f0a042c39d9864ae50e0a167 2024-12-03T13:12:28,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/3d4676a9f0a042c39d9864ae50e0a167 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/3d4676a9f0a042c39d9864ae50e0a167 2024-12-03T13:12:28,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/3d4676a9f0a042c39d9864ae50e0a167, entries=7, sequenceid=176, filesize=12.2 K 2024-12-03T13:12:28,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 2eded9fa458b851aa73417ed6a3398a1 in 52ms, sequenceid=176, compaction requested=false 2024-12-03T13:12:28,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:28,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:29,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:29,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:29,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:30,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:30,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:30,575 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 2eded9fa458b851aa73417ed6a3398a1, had cached 0 bytes from a total of 89165 2024-12-03T13:12:30,602 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region ab36b049a4b895c7ad524e3515c6427f, had cached 0 bytes from a total of 70862 2024-12-03T13:12:30,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:31,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:31,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:31,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:32,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:32,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:32,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:33,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:33,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:33,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:34,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:34,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:34,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:35,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:35,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:35,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:36,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:36,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:36,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:37,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:37,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:37,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:38,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:38,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:38,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:38,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-03T13:12:38,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/b4148a42e90445f18916534dcb286f51 is 1080, key is row0135/info:/1733231548745/Put/seqid=0 2024-12-03T13:12:38,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741862_1038 (size=29784) 2024-12-03T13:12:38,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741862_1038 (size=29784) 2024-12-03T13:12:38,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/b4148a42e90445f18916534dcb286f51 2024-12-03T13:12:38,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/b4148a42e90445f18916534dcb286f51 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b4148a42e90445f18916534dcb286f51 2024-12-03T13:12:38,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b4148a42e90445f18916534dcb286f51, entries=23, sequenceid=202, filesize=29.1 K 2024-12-03T13:12:38,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 2eded9fa458b851aa73417ed6a3398a1 in 24ms, sequenceid=202, compaction requested=true 2024-12-03T13:12:38,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:38,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2eded9fa458b851aa73417ed6a3398a1:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:12:38,801 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:38,801 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:12:38,802 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:12:38,802 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 2eded9fa458b851aa73417ed6a3398a1/info is initiating minor compaction (all files) 2024-12-03T13:12:38,802 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2eded9fa458b851aa73417ed6a3398a1/info in TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:12:38,803 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/05c4d541cd354f06a62c1da69d2eaaa3, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/3d4676a9f0a042c39d9864ae50e0a167, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b4148a42e90445f18916534dcb286f51] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp, totalSize=116.2 K 2024-12-03T13:12:38,803 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05c4d541cd354f06a62c1da69d2eaaa3, keycount=66, bloomtype=ROW, size=74.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733231502454 2024-12-03T13:12:38,803 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d4676a9f0a042c39d9864ae50e0a167, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733231546736 2024-12-03T13:12:38,804 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4148a42e90445f18916534dcb286f51, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733231548745 2024-12-03T13:12:38,815 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eded9fa458b851aa73417ed6a3398a1#info#compaction#57 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:12:38,816 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/ac9023a28a664c4f87af10374914f984 is 1080, key is row0062/info:/1733231502454/Put/seqid=0 2024-12-03T13:12:38,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741863_1039 (size=109099) 2024-12-03T13:12:38,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741863_1039 (size=109099) 2024-12-03T13:12:38,830 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/ac9023a28a664c4f87af10374914f984 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/ac9023a28a664c4f87af10374914f984 2024-12-03T13:12:38,849 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2eded9fa458b851aa73417ed6a3398a1/info of 2eded9fa458b851aa73417ed6a3398a1 into ac9023a28a664c4f87af10374914f984(size=106.5 K), total size for store is 106.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:12:38,849 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:38,850 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., storeName=2eded9fa458b851aa73417ed6a3398a1/info, priority=13, startTime=1733231558801; duration=0sec 2024-12-03T13:12:38,850 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:38,850 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eded9fa458b851aa73417ed6a3398a1:info 2024-12-03T13:12:38,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:39,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:39,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:39,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:40,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:40,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:40,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:12:40,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/5c81c11a340b42bf96d2f218663c44ef is 1080, key is row0158/info:/1733231558778/Put/seqid=0 2024-12-03T13:12:40,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741864_1040 (size=12516) 2024-12-03T13:12:40,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741864_1040 (size=12516) 2024-12-03T13:12:40,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/5c81c11a340b42bf96d2f218663c44ef 2024-12-03T13:12:40,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/5c81c11a340b42bf96d2f218663c44ef as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5c81c11a340b42bf96d2f218663c44ef 2024-12-03T13:12:40,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T13:12:40,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5c81c11a340b42bf96d2f218663c44ef, entries=7, sequenceid=213, filesize=12.2 K 2024-12-03T13:12:40,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231570814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 2024-12-03T13:12:40,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 2eded9fa458b851aa73417ed6a3398a1 in 29ms, sequenceid=213, compaction requested=false 2024-12-03T13:12:40,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:40,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:41,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:41,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:41,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:42,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:42,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:42,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:43,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:43,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:43,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:44,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:44,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:44,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:45,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:45,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:45,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:46,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:46,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:46,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:47,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:47,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:47,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:48,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:48,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:48,543 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:12:48,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:49,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:49,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:49,645 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-03T13:12:49,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:50,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:50,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:50,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:50,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-03T13:12:50,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/8b0708a700d048dd89fad25dda17d054 is 1080, key is row0165/info:/1733231560786/Put/seqid=0 2024-12-03T13:12:50,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:50,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741865_1041 (size=29784) 2024-12-03T13:12:50,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741865_1041 (size=29784) 2024-12-03T13:12:50,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/8b0708a700d048dd89fad25dda17d054 2024-12-03T13:12:50,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/8b0708a700d048dd89fad25dda17d054 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8b0708a700d048dd89fad25dda17d054 2024-12-03T13:12:50,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8b0708a700d048dd89fad25dda17d054, entries=23, sequenceid=239, filesize=29.1 K 2024-12-03T13:12:50,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for 2eded9fa458b851aa73417ed6a3398a1 in 24ms, sequenceid=239, compaction requested=true 2024-12-03T13:12:50,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:50,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2eded9fa458b851aa73417ed6a3398a1:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:12:50,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:50,882 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:12:50,883 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 151399 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:12:50,883 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 2eded9fa458b851aa73417ed6a3398a1/info is initiating minor compaction (all files) 2024-12-03T13:12:50,884 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2eded9fa458b851aa73417ed6a3398a1/info in TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:12:50,884 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/ac9023a28a664c4f87af10374914f984, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5c81c11a340b42bf96d2f218663c44ef, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8b0708a700d048dd89fad25dda17d054] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp, totalSize=147.9 K 2024-12-03T13:12:50,884 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac9023a28a664c4f87af10374914f984, keycount=96, bloomtype=ROW, size=106.5 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733231502454 2024-12-03T13:12:50,884 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c81c11a340b42bf96d2f218663c44ef, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733231558778 2024-12-03T13:12:50,885 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b0708a700d048dd89fad25dda17d054, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733231560786 2024-12-03T13:12:50,899 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eded9fa458b851aa73417ed6a3398a1#info#compaction#60 average throughput is 32.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:12:50,900 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/fd0f3d9dcedb423ea1e913454f13a95d is 1080, key is row0062/info:/1733231502454/Put/seqid=0 2024-12-03T13:12:50,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741866_1042 (size=141746) 2024-12-03T13:12:50,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741866_1042 (size=141746) 2024-12-03T13:12:51,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:51,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:51,321 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/fd0f3d9dcedb423ea1e913454f13a95d as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/fd0f3d9dcedb423ea1e913454f13a95d 2024-12-03T13:12:51,329 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2eded9fa458b851aa73417ed6a3398a1/info of 2eded9fa458b851aa73417ed6a3398a1 into fd0f3d9dcedb423ea1e913454f13a95d(size=138.4 K), total size for store is 138.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:12:51,329 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:51,329 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., storeName=2eded9fa458b851aa73417ed6a3398a1/info, priority=13, startTime=1733231570882; duration=0sec 2024-12-03T13:12:51,329 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:51,329 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eded9fa458b851aa73417ed6a3398a1:info 2024-12-03T13:12:51,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:52,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:52,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:52,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:52,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:12:52,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/5a2b10d0810848d8b787436d8dec9f31 is 1080, key is row0188/info:/1733231570859/Put/seqid=0 2024-12-03T13:12:52,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741867_1043 (size=12516) 2024-12-03T13:12:52,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741867_1043 (size=12516) 2024-12-03T13:12:52,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/5a2b10d0810848d8b787436d8dec9f31 2024-12-03T13:12:52,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/5a2b10d0810848d8b787436d8dec9f31 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5a2b10d0810848d8b787436d8dec9f31 2024-12-03T13:12:52,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5a2b10d0810848d8b787436d8dec9f31, entries=7, sequenceid=250, filesize=12.2 K 2024-12-03T13:12:52,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 2eded9fa458b851aa73417ed6a3398a1 in 58ms, sequenceid=250, compaction requested=false 2024-12-03T13:12:52,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:12:52,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-03T13:12:52,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/6cfb6a909c5345868aafcd494215fbc7 is 1080, key is row0195/info:/1733231572870/Put/seqid=0 2024-12-03T13:12:52,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40599 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T13:12:52,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231582953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 2024-12-03T13:12:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741868_1044 (size=26566) 2024-12-03T13:12:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741868_1044 (size=26566) 2024-12-03T13:12:53,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/6cfb6a909c5345868aafcd494215fbc7 2024-12-03T13:12:53,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/6cfb6a909c5345868aafcd494215fbc7 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6cfb6a909c5345868aafcd494215fbc7 2024-12-03T13:12:53,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6cfb6a909c5345868aafcd494215fbc7, entries=20, sequenceid=273, filesize=25.9 K 2024-12-03T13:12:53,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 2eded9fa458b851aa73417ed6a3398a1 in 121ms, sequenceid=273, compaction requested=true 2024-12-03T13:12:53,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:53,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2eded9fa458b851aa73417ed6a3398a1:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:12:53,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T13:12:53,053 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:12:53,055 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 180828 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:12:53,055 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 2eded9fa458b851aa73417ed6a3398a1/info is initiating minor compaction (all files) 2024-12-03T13:12:53,055 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2eded9fa458b851aa73417ed6a3398a1/info in TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:12:53,055 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/fd0f3d9dcedb423ea1e913454f13a95d, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5a2b10d0810848d8b787436d8dec9f31, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6cfb6a909c5345868aafcd494215fbc7] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp, totalSize=176.6 K 2024-12-03T13:12:53,058 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd0f3d9dcedb423ea1e913454f13a95d, keycount=126, bloomtype=ROW, size=138.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1733231502454 2024-12-03T13:12:53,060 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a2b10d0810848d8b787436d8dec9f31, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733231570859 2024-12-03T13:12:53,060 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cfb6a909c5345868aafcd494215fbc7, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733231572870 2024-12-03T13:12:53,092 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eded9fa458b851aa73417ed6a3398a1#info#compaction#63 average throughput is 22.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:12:53,092 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/e30e158a348b47928834d313666594cf is 1080, key is row0062/info:/1733231502454/Put/seqid=0 2024-12-03T13:12:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741869_1045 (size=170978) 2024-12-03T13:12:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741869_1045 (size=170978) 2024-12-03T13:12:53,162 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/e30e158a348b47928834d313666594cf as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/e30e158a348b47928834d313666594cf 2024-12-03T13:12:53,191 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2eded9fa458b851aa73417ed6a3398a1/info of 2eded9fa458b851aa73417ed6a3398a1 into e30e158a348b47928834d313666594cf(size=167.0 K), total size for store is 167.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:12:53,192 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:12:53,192 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., storeName=2eded9fa458b851aa73417ed6a3398a1/info, priority=13, startTime=1733231573051; duration=0sec 2024-12-03T13:12:53,192 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:12:53,192 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eded9fa458b851aa73417ed6a3398a1:info 2024-12-03T13:12:53,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:53,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:53,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:54,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:54,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:54,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:55,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:55,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:55,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:56,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:56,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:56,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:57,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:57,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:57,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:58,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:58,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:58,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:59,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:59,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:12:59,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:00,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:00,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:00,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:01,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:01,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:01,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:02,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:02,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:02,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:03,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:13:03,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-03T13:13:03,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/4719d98980e94fa9957eaf565e4f92c1 is 1080, key is row0215/info:/1733231572930/Put/seqid=0 2024-12-03T13:13:03,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741870_1046 (size=15760) 2024-12-03T13:13:03,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741870_1046 (size=15760) 2024-12-03T13:13:03,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/4719d98980e94fa9957eaf565e4f92c1 2024-12-03T13:13:03,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/4719d98980e94fa9957eaf565e4f92c1 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4719d98980e94fa9957eaf565e4f92c1 2024-12-03T13:13:03,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4719d98980e94fa9957eaf565e4f92c1, entries=10, sequenceid=287, filesize=15.4 K 2024-12-03T13:13:03,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 2eded9fa458b851aa73417ed6a3398a1 in 25ms, sequenceid=287, compaction requested=false 2024-12-03T13:13:03,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:13:03,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:03,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:03,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:04,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:04,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:04,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:05,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:13:05,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T13:13:05,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/bbb7d276bf6b4c889154c9fe35d00ac3 is 1080, key is row0225/info:/1733231585049/Put/seqid=0 2024-12-03T13:13:05,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741871_1047 (size=12523) 2024-12-03T13:13:05,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741871_1047 (size=12523) 2024-12-03T13:13:05,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40599 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T13:13:05,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40599 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56366 deadline: 1733231595096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=2eded9fa458b851aa73417ed6a3398a1, server=35946657df09,40599,1733231478697 2024-12-03T13:13:05,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:05,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:05,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/bbb7d276bf6b4c889154c9fe35d00ac3 2024-12-03T13:13:05,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/bbb7d276bf6b4c889154c9fe35d00ac3 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/bbb7d276bf6b4c889154c9fe35d00ac3 2024-12-03T13:13:05,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/bbb7d276bf6b4c889154c9fe35d00ac3, entries=7, sequenceid=297, filesize=12.2 K 2024-12-03T13:13:05,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 2eded9fa458b851aa73417ed6a3398a1 in 434ms, sequenceid=297, compaction requested=true 2024-12-03T13:13:05,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:13:05,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2eded9fa458b851aa73417ed6a3398a1:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T13:13:05,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:13:05,494 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T13:13:05,496 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 199261 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T13:13:05,496 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1540): 2eded9fa458b851aa73417ed6a3398a1/info is initiating minor compaction (all files) 2024-12-03T13:13:05,496 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2eded9fa458b851aa73417ed6a3398a1/info in TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:13:05,496 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/e30e158a348b47928834d313666594cf, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4719d98980e94fa9957eaf565e4f92c1, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/bbb7d276bf6b4c889154c9fe35d00ac3] into tmpdir=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp, totalSize=194.6 K 2024-12-03T13:13:05,496 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting e30e158a348b47928834d313666594cf, keycount=153, bloomtype=ROW, size=167.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733231502454 2024-12-03T13:13:05,497 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4719d98980e94fa9957eaf565e4f92c1, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733231572930 2024-12-03T13:13:05,497 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbb7d276bf6b4c889154c9fe35d00ac3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733231585049 2024-12-03T13:13:05,517 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eded9fa458b851aa73417ed6a3398a1#info#compaction#66 average throughput is 43.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T13:13:05,518 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/2ea9a76da2594fc2bca9b060bde8bcc3 is 1080, key is row0062/info:/1733231502454/Put/seqid=0 2024-12-03T13:13:05,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741872_1048 (size=189427) 2024-12-03T13:13:05,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741872_1048 (size=189427) 2024-12-03T13:13:05,533 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/2ea9a76da2594fc2bca9b060bde8bcc3 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/2ea9a76da2594fc2bca9b060bde8bcc3 2024-12-03T13:13:05,545 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2eded9fa458b851aa73417ed6a3398a1/info of 2eded9fa458b851aa73417ed6a3398a1 into 2ea9a76da2594fc2bca9b060bde8bcc3(size=185.0 K), total size for store is 185.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T13:13:05,546 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:13:05,546 INFO [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., storeName=2eded9fa458b851aa73417ed6a3398a1/info, priority=13, startTime=1733231585494; duration=0sec 2024-12-03T13:13:05,546 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T13:13:05,546 DEBUG [RS:0;35946657df09:40599-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eded9fa458b851aa73417ed6a3398a1:info 2024-12-03T13:13:05,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:06,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:06,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:06,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:07,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:07,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:07,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:08,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:08,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:08,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:09,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:09,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:09,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:10,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:10,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:10,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:11,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:11,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:11,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:12,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:12,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:12,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:13,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:13,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:13,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:14,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:14,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:14,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40599 {}] regionserver.HRegion(8581): Flush requested on 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:13:15,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-03T13:13:15,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/b2c22217e17f4803861ed34e9b89e0c0 is 1080, key is row0232/info:/1733231585060/Put/seqid=0 2024-12-03T13:13:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741873_1049 (size=29807) 2024-12-03T13:13:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741873_1049 (size=29807) 2024-12-03T13:13:15,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/b2c22217e17f4803861ed34e9b89e0c0 2024-12-03T13:13:15,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/b2c22217e17f4803861ed34e9b89e0c0 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b2c22217e17f4803861ed34e9b89e0c0 2024-12-03T13:13:15,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b2c22217e17f4803861ed34e9b89e0c0, entries=23, sequenceid=324, filesize=29.1 K 2024-12-03T13:13:15,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 2eded9fa458b851aa73417ed6a3398a1 in 34ms, sequenceid=324, compaction requested=false 2024-12-03T13:13:15,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:13:15,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:15,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:15,575 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 2eded9fa458b851aa73417ed6a3398a1, had cached 0 bytes from a total of 219234 2024-12-03T13:13:15,603 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region ab36b049a4b895c7ad524e3515c6427f, had cached 0 bytes from a total of 70862 2024-12-03T13:13:15,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:16,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:16,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:16,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:17,113 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-03T13:13:17,114 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40599%2C1733231478697.1733231597113 2024-12-03T13:13:17,197 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231479209 with entries=311, filesize=307.43 KB; new WAL /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231597113 2024-12-03T13:13:17,198 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43497:43497),(127.0.0.1/127.0.0.1:37299:37299)] 2024-12-03T13:13:17,198 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231479209 is not closed yet, will try archiving it next time 2024-12-03T13:13:17,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741833_1009 (size=314812) 2024-12-03T13:13:17,214 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-03T13:13:17,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741833_1009 (size=314812) 2024-12-03T13:13:17,230 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/info/750ae1b90042442080f7b927dbb3541d is 193, key is TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1./info:regioninfo/1733231505602/Put/seqid=0 2024-12-03T13:13:17,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741875_1051 (size=7803) 2024-12-03T13:13:17,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741875_1051 (size=7803) 2024-12-03T13:13:17,251 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/info/750ae1b90042442080f7b927dbb3541d 2024-12-03T13:13:17,261 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/.tmp/info/750ae1b90042442080f7b927dbb3541d as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/info/750ae1b90042442080f7b927dbb3541d 2024-12-03T13:13:17,292 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/info/750ae1b90042442080f7b927dbb3541d, entries=16, sequenceid=24, filesize=7.6 K 2024-12-03T13:13:17,294 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 80ms, sequenceid=24, compaction requested=false 2024-12-03T13:13:17,294 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-03T13:13:17,294 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing c3c4997abc2047f0af08dcbeaa6cd4b5 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-03T13:13:17,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:17,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:17,348 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5/.tmp/info/0e4e36ff47434803bb32cb2f9b7a8705 is 45, key is default/info:d/1733231480162/Put/seqid=0 2024-12-03T13:13:17,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741876_1052 (size=5037) 2024-12-03T13:13:17,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741876_1052 (size=5037) 2024-12-03T13:13:17,382 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5/.tmp/info/0e4e36ff47434803bb32cb2f9b7a8705 2024-12-03T13:13:17,390 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5/.tmp/info/0e4e36ff47434803bb32cb2f9b7a8705 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5/info/0e4e36ff47434803bb32cb2f9b7a8705 2024-12-03T13:13:17,395 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5/info/0e4e36ff47434803bb32cb2f9b7a8705, entries=2, sequenceid=6, filesize=4.9 K 2024-12-03T13:13:17,396 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for c3c4997abc2047f0af08dcbeaa6cd4b5 in 102ms, sequenceid=6, compaction requested=false 2024-12-03T13:13:17,396 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for c3c4997abc2047f0af08dcbeaa6cd4b5: 2024-12-03T13:13:17,396 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 2eded9fa458b851aa73417ed6a3398a1 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-12-03T13:13:17,399 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/fc6b4196e6d94b92a8b2a58908594aa1 is 1080, key is row0255/info:/1733231595108/Put/seqid=0 2024-12-03T13:13:17,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741877_1053 (size=7116) 2024-12-03T13:13:17,413 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/fc6b4196e6d94b92a8b2a58908594aa1 2024-12-03T13:13:17,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741877_1053 (size=7116) 2024-12-03T13:13:17,420 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/.tmp/info/fc6b4196e6d94b92a8b2a58908594aa1 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/fc6b4196e6d94b92a8b2a58908594aa1 2024-12-03T13:13:17,424 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/fc6b4196e6d94b92a8b2a58908594aa1, entries=2, sequenceid=329, filesize=6.9 K 2024-12-03T13:13:17,427 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 2eded9fa458b851aa73417ed6a3398a1 in 30ms, sequenceid=329, compaction requested=true 2024-12-03T13:13:17,427 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:13:17,427 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for ab36b049a4b895c7ad524e3515c6427f: 2024-12-03T13:13:17,427 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C40599%2C1733231478697.1733231597427 2024-12-03T13:13:17,450 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231597113 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231597427 2024-12-03T13:13:17,450 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37299:37299),(127.0.0.1/127.0.0.1:43497:43497)] 2024-12-03T13:13:17,450 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231597113 is not closed yet, will try archiving it next time 2024-12-03T13:13:17,451 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231479209 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/oldWALs/35946657df09%2C40599%2C1733231478697.1733231479209 2024-12-03T13:13:17,451 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T13:13:17,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741874_1050 (size=1255) 2024-12-03T13:13:17,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741874_1050 (size=1255) 2024-12-03T13:13:17,455 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697/35946657df09%2C40599%2C1733231478697.1733231597113 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/oldWALs/35946657df09%2C40599%2C1733231478697.1733231597113 2024-12-03T13:13:17,551 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-03T13:13:17,551 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T13:13:17,551 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63f87d2f to 127.0.0.1:59615 2024-12-03T13:13:17,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:17,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T13:13:17,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1702798772, stopped=false 2024-12-03T13:13:17,552 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=35946657df09,45951,1733231478562 2024-12-03T13:13:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:13:17,562 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-03T13:13:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:13:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:17,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:17,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:17,562 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,40599,1733231478697' ***** 2024-12-03T13:13:17,562 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T13:13:17,563 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(3579): Received CLOSE for c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(3579): Received CLOSE for 2eded9fa458b851aa73417ed6a3398a1 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(3579): Received CLOSE for ab36b049a4b895c7ad524e3515c6427f 2024-12-03T13:13:17,563 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing c3c4997abc2047f0af08dcbeaa6cd4b5, disabling compactions & flushes 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,40599,1733231478697 2024-12-03T13:13:17,563 DEBUG [RS:0;35946657df09:40599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:17,563 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:13:17,563 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:13:17,563 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. after waiting 0 ms 2024-12-03T13:13:17,563 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:13:17,563 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T13:13:17,563 INFO [RS:0;35946657df09:40599 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T13:13:17,564 INFO [RS:0;35946657df09:40599 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T13:13:17,564 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T13:13:17,565 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:13:17,570 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-03T13:13:17,571 DEBUG [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, c3c4997abc2047f0af08dcbeaa6cd4b5=hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5., 2eded9fa458b851aa73417ed6a3398a1=TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1., ab36b049a4b895c7ad524e3515c6427f=TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f.} 2024-12-03T13:13:17,571 DEBUG [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 2eded9fa458b851aa73417ed6a3398a1, ab36b049a4b895c7ad524e3515c6427f, c3c4997abc2047f0af08dcbeaa6cd4b5 2024-12-03T13:13:17,575 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:13:17,575 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:13:17,575 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:13:17,575 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:13:17,575 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:13:17,611 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/namespace/c3c4997abc2047f0af08dcbeaa6cd4b5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T13:13:17,612 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:13:17,612 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for c3c4997abc2047f0af08dcbeaa6cd4b5: 2024-12-03T13:13:17,612 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733231479742.c3c4997abc2047f0af08dcbeaa6cd4b5. 2024-12-03T13:13:17,612 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 2eded9fa458b851aa73417ed6a3398a1, disabling compactions & flushes 2024-12-03T13:13:17,612 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:13:17,612 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:13:17,612 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. after waiting 0 ms 2024-12-03T13:13:17,612 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:13:17,613 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4->hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a-top, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/41b578e5bb814188acc76e9b3b0fa908, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4a6300ea38cd4bcba232c1eefd5a9b9b, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/15e4ad4806534d3ebbd033cf7328d103, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8c08d5bd51094b89a4eefab23d1d0c95, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/f1ad0f9025a24e62b3c5bf6c84c4afca, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/05c4d541cd354f06a62c1da69d2eaaa3, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6543555a2b854707872764c34a852c0e, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/3d4676a9f0a042c39d9864ae50e0a167, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/ac9023a28a664c4f87af10374914f984, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b4148a42e90445f18916534dcb286f51, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5c81c11a340b42bf96d2f218663c44ef, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/fd0f3d9dcedb423ea1e913454f13a95d, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8b0708a700d048dd89fad25dda17d054, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5a2b10d0810848d8b787436d8dec9f31, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/e30e158a348b47928834d313666594cf, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6cfb6a909c5345868aafcd494215fbc7, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4719d98980e94fa9957eaf565e4f92c1, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/bbb7d276bf6b4c889154c9fe35d00ac3] to archive 2024-12-03T13:13:17,615 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T13:13:17,623 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-03T13:13:17,623 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4 2024-12-03T13:13:17,624 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T13:13:17,624 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:13:17,624 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:13:17,624 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T13:13:17,625 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/41b578e5bb814188acc76e9b3b0fa908 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/41b578e5bb814188acc76e9b3b0fa908 2024-12-03T13:13:17,626 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/TestLogRolling-testLogRolling=964427a12e751e568278ebaf291689c4-d3cac76e72164ca3a5820b293bb4ca4a 2024-12-03T13:13:17,628 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4a6300ea38cd4bcba232c1eefd5a9b9b to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4a6300ea38cd4bcba232c1eefd5a9b9b 2024-12-03T13:13:17,633 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/15e4ad4806534d3ebbd033cf7328d103 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/15e4ad4806534d3ebbd033cf7328d103 2024-12-03T13:13:17,635 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8c08d5bd51094b89a4eefab23d1d0c95 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8c08d5bd51094b89a4eefab23d1d0c95 2024-12-03T13:13:17,636 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/f1ad0f9025a24e62b3c5bf6c84c4afca to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/f1ad0f9025a24e62b3c5bf6c84c4afca 2024-12-03T13:13:17,638 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/05c4d541cd354f06a62c1da69d2eaaa3 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/05c4d541cd354f06a62c1da69d2eaaa3 2024-12-03T13:13:17,640 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6543555a2b854707872764c34a852c0e to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6543555a2b854707872764c34a852c0e 2024-12-03T13:13:17,642 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/3d4676a9f0a042c39d9864ae50e0a167 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/3d4676a9f0a042c39d9864ae50e0a167 2024-12-03T13:13:17,643 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/ac9023a28a664c4f87af10374914f984 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/ac9023a28a664c4f87af10374914f984 2024-12-03T13:13:17,645 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b4148a42e90445f18916534dcb286f51 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/b4148a42e90445f18916534dcb286f51 2024-12-03T13:13:17,646 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5c81c11a340b42bf96d2f218663c44ef to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5c81c11a340b42bf96d2f218663c44ef 2024-12-03T13:13:17,648 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/fd0f3d9dcedb423ea1e913454f13a95d to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/fd0f3d9dcedb423ea1e913454f13a95d 2024-12-03T13:13:17,649 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8b0708a700d048dd89fad25dda17d054 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/8b0708a700d048dd89fad25dda17d054 2024-12-03T13:13:17,651 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5a2b10d0810848d8b787436d8dec9f31 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/5a2b10d0810848d8b787436d8dec9f31 2024-12-03T13:13:17,653 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/e30e158a348b47928834d313666594cf to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/e30e158a348b47928834d313666594cf 2024-12-03T13:13:17,655 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6cfb6a909c5345868aafcd494215fbc7 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/6cfb6a909c5345868aafcd494215fbc7 2024-12-03T13:13:17,657 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4719d98980e94fa9957eaf565e4f92c1 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/4719d98980e94fa9957eaf565e4f92c1 2024-12-03T13:13:17,658 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/bbb7d276bf6b4c889154c9fe35d00ac3 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/info/bbb7d276bf6b4c889154c9fe35d00ac3 2024-12-03T13:13:17,679 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/2eded9fa458b851aa73417ed6a3398a1/recovered.edits/332.seqid, newMaxSeqId=332, maxSeqId=105 2024-12-03T13:13:17,680 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:13:17,680 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 2eded9fa458b851aa73417ed6a3398a1: 2024-12-03T13:13:17,680 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733231504930.2eded9fa458b851aa73417ed6a3398a1. 2024-12-03T13:13:17,681 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ab36b049a4b895c7ad524e3515c6427f, disabling compactions & flushes 2024-12-03T13:13:17,681 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:13:17,681 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:13:17,681 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. after waiting 0 ms 2024-12-03T13:13:17,681 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:13:17,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4->hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/964427a12e751e568278ebaf291689c4/info/9d05414febdf448883d835159f30617a-bottom] to archive 2024-12-03T13:13:17,693 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T13:13:17,716 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4 to hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/archive/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/info/9d05414febdf448883d835159f30617a.964427a12e751e568278ebaf291689c4 2024-12-03T13:13:17,751 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/data/default/TestLogRolling-testLogRolling/ab36b049a4b895c7ad524e3515c6427f/recovered.edits/110.seqid, newMaxSeqId=110, maxSeqId=105 2024-12-03T13:13:17,752 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:13:17,752 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ab36b049a4b895c7ad524e3515c6427f: 2024-12-03T13:13:17,752 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733231504930.ab36b049a4b895c7ad524e3515c6427f. 2024-12-03T13:13:17,771 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,40599,1733231478697; all regions closed. 2024-12-03T13:13:17,771 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697 2024-12-03T13:13:17,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741834_1010 (size=9351) 2024-12-03T13:13:17,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741834_1010 (size=9351) 2024-12-03T13:13:17,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:18,136 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T13:13:18,136 INFO [regionserver/35946657df09:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T13:13:18,183 DEBUG [RS:0;35946657df09:40599 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/oldWALs 2024-12-03T13:13:18,183 INFO [RS:0;35946657df09:40599 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C40599%2C1733231478697.meta:.meta(num 1733231479626) 2024-12-03T13:13:18,183 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/WALs/35946657df09,40599,1733231478697 2024-12-03T13:13:18,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741878_1054 (size=1071) 2024-12-03T13:13:18,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741878_1054 (size=1071) 2024-12-03T13:13:18,187 DEBUG [RS:0;35946657df09:40599 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/oldWALs 2024-12-03T13:13:18,187 INFO [RS:0;35946657df09:40599 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C40599%2C1733231478697:(num 1733231597427) 2024-12-03T13:13:18,187 DEBUG [RS:0;35946657df09:40599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:18,187 INFO [RS:0;35946657df09:40599 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:13:18,188 INFO [RS:0;35946657df09:40599 {}] hbase.ChoreService(370): Chore service for: regionserver/35946657df09:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T13:13:18,188 INFO [RS:0;35946657df09:40599 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40599 2024-12-03T13:13:18,188 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:13:18,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/35946657df09,40599,1733231478697 2024-12-03T13:13:18,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:13:18,237 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [35946657df09,40599,1733231478697] 2024-12-03T13:13:18,237 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 35946657df09,40599,1733231478697; numProcessing=1 2024-12-03T13:13:18,245 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/35946657df09,40599,1733231478697 already deleted, retry=false 2024-12-03T13:13:18,245 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 35946657df09,40599,1733231478697 expired; onlineServers=0 2024-12-03T13:13:18,245 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,45951,1733231478562' ***** 2024-12-03T13:13:18,245 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T13:13:18,246 DEBUG [M:0;35946657df09:45951 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a80af03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:13:18,246 INFO [M:0;35946657df09:45951 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,45951,1733231478562 2024-12-03T13:13:18,246 INFO [M:0;35946657df09:45951 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,45951,1733231478562; all regions closed. 2024-12-03T13:13:18,246 DEBUG [M:0;35946657df09:45951 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:18,246 DEBUG [M:0;35946657df09:45951 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T13:13:18,246 DEBUG [M:0;35946657df09:45951 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T13:13:18,246 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T13:13:18,246 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231478968 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231478968,5,FailOnTimeoutGroup] 2024-12-03T13:13:18,246 INFO [M:0;35946657df09:45951 {}] hbase.ChoreService(370): Chore service for: master/35946657df09:0 had [] on shutdown 2024-12-03T13:13:18,246 DEBUG [M:0;35946657df09:45951 {}] master.HMaster(1733): Stopping service threads 2024-12-03T13:13:18,246 INFO [M:0;35946657df09:45951 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T13:13:18,246 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231478968 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231478968,5,FailOnTimeoutGroup] 2024-12-03T13:13:18,246 INFO [M:0;35946657df09:45951 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T13:13:18,246 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T13:13:18,254 DEBUG [M:0;35946657df09:45951 {}] zookeeper.ZKUtil(347): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T13:13:18,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T13:13:18,254 WARN [M:0;35946657df09:45951 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T13:13:18,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:18,254 INFO [M:0;35946657df09:45951 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-03T13:13:18,254 INFO [M:0;35946657df09:45951 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T13:13:18,254 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:13:18,254 INFO [M:0;35946657df09:45951 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:18,254 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:18,254 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:13:18,254 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:18,254 INFO [M:0;35946657df09:45951 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.44 KB heapSize=81.69 KB 2024-12-03T13:13:18,255 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:13:18,285 DEBUG [M:0;35946657df09:45951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01b0d9cb867e41f88abc52d862a37ab1 is 82, key is hbase:meta,,1/info:regioninfo/1733231479659/Put/seqid=0 2024-12-03T13:13:18,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741879_1055 (size=5672) 2024-12-03T13:13:18,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741879_1055 (size=5672) 2024-12-03T13:13:18,299 INFO [M:0;35946657df09:45951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01b0d9cb867e41f88abc52d862a37ab1 2024-12-03T13:13:18,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:18,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:18,320 DEBUG [M:0;35946657df09:45951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/32e0cd3ae50a4dffad99a1ca3e53e7f8 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733231480718/Put/seqid=0 2024-12-03T13:13:18,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:18,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40599-0x1019c9b29860001, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:18,339 INFO [RS:0;35946657df09:40599 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,40599,1733231478697; zookeeper connection closed. 2024-12-03T13:13:18,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741880_1056 (size=7285) 2024-12-03T13:13:18,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741880_1056 (size=7285) 2024-12-03T13:13:18,341 INFO [M:0;35946657df09:45951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.83 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/32e0cd3ae50a4dffad99a1ca3e53e7f8 2024-12-03T13:13:18,346 INFO [M:0;35946657df09:45951 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 32e0cd3ae50a4dffad99a1ca3e53e7f8 2024-12-03T13:13:18,346 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7bae4c02 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7bae4c02 2024-12-03T13:13:18,347 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T13:13:18,366 DEBUG [M:0;35946657df09:45951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2c58ddbaab3a465c9754f8a7b6f9e5d3 is 69, key is 35946657df09,40599,1733231478697/rs:state/1733231479046/Put/seqid=0 2024-12-03T13:13:18,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741881_1057 (size=5156) 2024-12-03T13:13:18,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741881_1057 (size=5156) 2024-12-03T13:13:18,385 INFO [M:0;35946657df09:45951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2c58ddbaab3a465c9754f8a7b6f9e5d3 2024-12-03T13:13:18,408 DEBUG [M:0;35946657df09:45951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/562fd1730f6742bbbf2f5b50c35905d0 is 52, key is load_balancer_on/state:d/1733231480334/Put/seqid=0 2024-12-03T13:13:18,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741882_1058 (size=5056) 2024-12-03T13:13:18,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741882_1058 (size=5056) 2024-12-03T13:13:18,421 INFO [M:0;35946657df09:45951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/562fd1730f6742bbbf2f5b50c35905d0 2024-12-03T13:13:18,426 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/01b0d9cb867e41f88abc52d862a37ab1 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/01b0d9cb867e41f88abc52d862a37ab1 2024-12-03T13:13:18,435 INFO [M:0;35946657df09:45951 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/01b0d9cb867e41f88abc52d862a37ab1, entries=8, sequenceid=164, filesize=5.5 K 2024-12-03T13:13:18,436 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/32e0cd3ae50a4dffad99a1ca3e53e7f8 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/32e0cd3ae50a4dffad99a1ca3e53e7f8 2024-12-03T13:13:18,441 INFO [M:0;35946657df09:45951 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 32e0cd3ae50a4dffad99a1ca3e53e7f8 2024-12-03T13:13:18,441 INFO [M:0;35946657df09:45951 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/32e0cd3ae50a4dffad99a1ca3e53e7f8, entries=18, sequenceid=164, filesize=7.1 K 2024-12-03T13:13:18,442 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2c58ddbaab3a465c9754f8a7b6f9e5d3 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2c58ddbaab3a465c9754f8a7b6f9e5d3 2024-12-03T13:13:18,446 INFO [M:0;35946657df09:45951 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2c58ddbaab3a465c9754f8a7b6f9e5d3, entries=1, sequenceid=164, filesize=5.0 K 2024-12-03T13:13:18,447 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/562fd1730f6742bbbf2f5b50c35905d0 as hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/562fd1730f6742bbbf2f5b50c35905d0 2024-12-03T13:13:18,451 INFO [M:0;35946657df09:45951 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34583/user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/562fd1730f6742bbbf2f5b50c35905d0, entries=1, sequenceid=164, filesize=4.9 K 2024-12-03T13:13:18,459 INFO [M:0;35946657df09:45951 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.44 KB/68031, heapSize ~81.63 KB/83584, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 204ms, sequenceid=164, compaction requested=false 2024-12-03T13:13:18,460 INFO [M:0;35946657df09:45951 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:18,460 DEBUG [M:0;35946657df09:45951 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:13:18,460 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/fe04dc36-bcb0-7ed7-0b22-1a87d6cc2da3/MasterData/WALs/35946657df09,45951,1733231478562 2024-12-03T13:13:18,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34307 is added to blk_1073741830_1006 (size=79260) 2024-12-03T13:13:18,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40969 is added to blk_1073741830_1006 (size=79260) 2024-12-03T13:13:18,464 INFO [M:0;35946657df09:45951 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-03T13:13:18,464 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:13:18,464 INFO [M:0;35946657df09:45951 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45951 2024-12-03T13:13:18,470 DEBUG [M:0;35946657df09:45951 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/35946657df09,45951,1733231478562 already deleted, retry=false 2024-12-03T13:13:18,544 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T13:13:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:18,579 INFO [M:0;35946657df09:45951 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,45951,1733231478562; zookeeper connection closed. 2024-12-03T13:13:18,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45951-0x1019c9b29860000, quorum=127.0.0.1:59615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:18,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@290defc9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:13:18,588 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3054128b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:13:18,588 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:13:18,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76d55a99{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:13:18,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2feb9278{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir/,STOPPED} 2024-12-03T13:13:18,596 WARN [BP-1105107160-172.17.0.2-1733231476851 heartbeating to localhost/127.0.0.1:34583 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:13:18,596 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:13:18,596 WARN [BP-1105107160-172.17.0.2-1733231476851 heartbeating to localhost/127.0.0.1:34583 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105107160-172.17.0.2-1733231476851 (Datanode Uuid 587006a0-d2ce-462f-a505-7ffd679c4f3e) service to localhost/127.0.0.1:34583 2024-12-03T13:13:18,596 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:13:18,596 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data3/current/BP-1105107160-172.17.0.2-1733231476851 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:18,596 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:13:18,598 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data4/current/BP-1105107160-172.17.0.2-1733231476851 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:18,615 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ec3473f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:13:18,619 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2db9b638{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:13:18,619 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:13:18,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b8fcc9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:13:18,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49e87701{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir/,STOPPED} 2024-12-03T13:13:18,629 WARN [BP-1105107160-172.17.0.2-1733231476851 heartbeating to localhost/127.0.0.1:34583 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:13:18,629 WARN [BP-1105107160-172.17.0.2-1733231476851 heartbeating to localhost/127.0.0.1:34583 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1105107160-172.17.0.2-1733231476851 (Datanode Uuid 783e0338-1325-4a89-9548-84ac34f93c76) service to localhost/127.0.0.1:34583 2024-12-03T13:13:18,630 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:13:18,630 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:13:18,630 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data2/current/BP-1105107160-172.17.0.2-1733231476851 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:18,630 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/cluster_544ddc48-3306-e34b-de84-543349964af8/dfs/data/data1/current/BP-1105107160-172.17.0.2-1733231476851 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:18,631 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:13:18,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79537413{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:13:18,663 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53b67655{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:13:18,663 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:13:18,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bb5a098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:13:18,664 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3972b007{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir/,STOPPED} 2024-12-03T13:13:18,685 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-03T13:13:18,727 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-03T13:13:18,739 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=122 (was 110) - Thread LEAK? -, OpenFileDescriptor=484 (was 466) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=619 (was 1036), ProcessCount=11 (was 11), AvailableMemoryMB=2013 (was 659) - AvailableMemoryMB LEAK? - 2024-12-03T13:13:18,755 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=123, OpenFileDescriptor=484, MaxFileDescriptor=1048576, SystemLoadAverage=619, ProcessCount=11, AvailableMemoryMB=2013 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.log.dir so I do NOT create it in target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/271ac27e-fbb1-1dae-b5de-69cab9889265/hadoop.tmp.dir so I do NOT create it in target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c, deleteOnExit=true 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/test.cache.data in system properties and HBase conf 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.log.dir in system properties and HBase conf 2024-12-03T13:13:18,756 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-03T13:13:18,757 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T13:13:18,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:13:18,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T13:13:18,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/nfs.dump.dir in system properties and HBase conf 2024-12-03T13:13:18,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/java.io.tmpdir in system properties and HBase conf 2024-12-03T13:13:18,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T13:13:18,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T13:13:18,758 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T13:13:18,776 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:13:18,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:19,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:13:19,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:13:19,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:13:19,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:13:19,044 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:13:19,044 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:13:19,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@249f3f68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:13:19,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63976b67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:13:19,074 INFO [regionserver/35946657df09:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:13:19,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e575f71{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/java.io.tmpdir/jetty-localhost-38675-hadoop-hdfs-3_4_1-tests_jar-_-any-11248039239431445710/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:13:19,171 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f55d86f{HTTP/1.1, (http/1.1)}{localhost:38675} 2024-12-03T13:13:19,171 INFO [Time-limited test {}] server.Server(415): Started @444936ms 2024-12-03T13:13:19,188 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T13:13:19,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:19,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:19,375 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:13:19,379 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:13:19,380 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:13:19,380 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:13:19,380 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T13:13:19,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f856bbb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:13:19,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37b4b146{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:13:19,491 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25b7e8a0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/java.io.tmpdir/jetty-localhost-40515-hadoop-hdfs-3_4_1-tests_jar-_-any-5516708161038326636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:13:19,491 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21af4670{HTTP/1.1, (http/1.1)}{localhost:40515} 2024-12-03T13:13:19,491 INFO [Time-limited test {}] server.Server(415): Started @445256ms 2024-12-03T13:13:19,493 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:13:19,630 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T13:13:19,634 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T13:13:19,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T13:13:19,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T13:13:19,655 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T13:13:19,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38c3eed5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.log.dir/,AVAILABLE} 2024-12-03T13:13:19,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ad706ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T13:13:19,834 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31b05750{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/java.io.tmpdir/jetty-localhost-34221-hadoop-hdfs-3_4_1-tests_jar-_-any-3685363459224116657/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:13:19,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78f9a22a{HTTP/1.1, (http/1.1)}{localhost:34221} 2024-12-03T13:13:19,835 INFO [Time-limited test {}] server.Server(415): Started @445600ms 2024-12-03T13:13:19,837 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T13:13:19,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:20,062 WARN [Thread-2246 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data1/current/BP-2079988190-172.17.0.2-1733231598815/current, will proceed with Du for space computation calculation, 2024-12-03T13:13:20,067 WARN [Thread-2247 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data2/current/BP-2079988190-172.17.0.2-1733231598815/current, will proceed with Du for space computation calculation, 2024-12-03T13:13:20,163 WARN [Thread-2212 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:13:20,181 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x39002234504632e4 with lease ID 0x3ec405909e235818: Processing first storage report for DS-742620ef-e18f-490a-bd3c-f8268a920a54 from datanode DatanodeRegistration(127.0.0.1:34715, datanodeUuid=0abeb825-835c-41f0-b088-518b1034df39, infoPort=46561, infoSecurePort=0, ipcPort=36245, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815) 2024-12-03T13:13:20,181 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39002234504632e4 with lease ID 0x3ec405909e235818: from storage DS-742620ef-e18f-490a-bd3c-f8268a920a54 node DatanodeRegistration(127.0.0.1:34715, datanodeUuid=0abeb825-835c-41f0-b088-518b1034df39, infoPort=46561, infoSecurePort=0, ipcPort=36245, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:13:20,186 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x39002234504632e4 with lease ID 0x3ec405909e235818: Processing first storage report for DS-e58ec938-05bf-45c4-9a9c-6f6126d850e6 from datanode DatanodeRegistration(127.0.0.1:34715, datanodeUuid=0abeb825-835c-41f0-b088-518b1034df39, infoPort=46561, infoSecurePort=0, ipcPort=36245, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815) 2024-12-03T13:13:20,186 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39002234504632e4 with lease ID 0x3ec405909e235818: from storage DS-e58ec938-05bf-45c4-9a9c-6f6126d850e6 node DatanodeRegistration(127.0.0.1:34715, datanodeUuid=0abeb825-835c-41f0-b088-518b1034df39, infoPort=46561, infoSecurePort=0, ipcPort=36245, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:13:20,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:20,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:20,492 WARN [Thread-2259 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data3/current/BP-2079988190-172.17.0.2-1733231598815/current, will proceed with Du for space computation calculation, 2024-12-03T13:13:20,495 WARN [Thread-2260 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data4/current/BP-2079988190-172.17.0.2-1733231598815/current, will proceed with Du for space computation calculation, 2024-12-03T13:13:20,539 WARN [Thread-2237 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T13:13:20,544 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2a8bf12b832a8a3 with lease ID 0x3ec405909e235819: Processing first storage report for DS-4d3aed87-e75a-4833-9b26-3191f2f5d765 from datanode DatanodeRegistration(127.0.0.1:45117, datanodeUuid=a4b78c45-4791-4db5-b009-f6e732f86394, infoPort=41647, infoSecurePort=0, ipcPort=36737, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815) 2024-12-03T13:13:20,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2a8bf12b832a8a3 with lease ID 0x3ec405909e235819: from storage DS-4d3aed87-e75a-4833-9b26-3191f2f5d765 node DatanodeRegistration(127.0.0.1:45117, datanodeUuid=a4b78c45-4791-4db5-b009-f6e732f86394, infoPort=41647, infoSecurePort=0, ipcPort=36737, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:13:20,546 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2a8bf12b832a8a3 with lease ID 0x3ec405909e235819: Processing first storage report for DS-3994a3ea-e3a2-4814-9e70-f7da54dd92e9 from datanode DatanodeRegistration(127.0.0.1:45117, datanodeUuid=a4b78c45-4791-4db5-b009-f6e732f86394, infoPort=41647, infoSecurePort=0, ipcPort=36737, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815) 2024-12-03T13:13:20,546 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2a8bf12b832a8a3 with lease ID 0x3ec405909e235819: from storage DS-3994a3ea-e3a2-4814-9e70-f7da54dd92e9 node DatanodeRegistration(127.0.0.1:45117, datanodeUuid=a4b78c45-4791-4db5-b009-f6e732f86394, infoPort=41647, infoSecurePort=0, ipcPort=36737, storageInfo=lv=-57;cid=testClusterID;nsid=125247049;c=1733231598815), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T13:13:20,645 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea 2024-12-03T13:13:20,684 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/zookeeper_0, clientPort=54151, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T13:13:20,699 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=54151 2024-12-03T13:13:20,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:20,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:20,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:13:20,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741825_1001 (size=7) 2024-12-03T13:13:20,822 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1 with version=8 2024-12-03T13:13:20,823 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39447/user/jenkins/test-data/09b84984-a2a8-c344-3dd7-1d55d42b8138/hbase-staging 2024-12-03T13:13:20,825 INFO [Time-limited test {}] client.ConnectionUtils(129): master/35946657df09:0 server-side Connection retries=45 2024-12-03T13:13:20,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:13:20,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:13:20,826 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:13:20,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:13:20,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:13:20,826 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:13:20,826 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:13:20,835 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41129 2024-12-03T13:13:20,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:20,841 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:20,845 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41129 connecting to ZooKeeper ensemble=127.0.0.1:54151 2024-12-03T13:13:20,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:20,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:411290x0, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:13:20,955 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41129-0x1019c9d06900000 connected 2024-12-03T13:13:21,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:13:21,103 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:13:21,117 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:13:21,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41129 2024-12-03T13:13:21,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41129 2024-12-03T13:13:21,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41129 2024-12-03T13:13:21,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41129 2024-12-03T13:13:21,167 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41129 2024-12-03T13:13:21,171 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1, hbase.cluster.distributed=false 2024-12-03T13:13:21,208 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/35946657df09:0 server-side Connection retries=45 2024-12-03T13:13:21,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:13:21,211 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T13:13:21,211 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T13:13:21,211 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T13:13:21,211 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T13:13:21,211 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T13:13:21,211 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T13:13:21,214 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45683 2024-12-03T13:13:21,214 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T13:13:21,218 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T13:13:21,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:21,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:21,227 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45683 connecting to ZooKeeper ensemble=127.0.0.1:54151 2024-12-03T13:13:21,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:21,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:21,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:456830x0, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T13:13:21,473 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:456830x0, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:13:21,473 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45683-0x1019c9d06900001 connected 2024-12-03T13:13:21,473 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:13:21,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T13:13:21,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45683 2024-12-03T13:13:21,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45683 2024-12-03T13:13:21,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45683 2024-12-03T13:13:21,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45683 2024-12-03T13:13:21,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45683 2024-12-03T13:13:21,504 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/35946657df09,41129,1733231600825 2024-12-03T13:13:21,521 DEBUG [M:0;35946657df09:41129 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;35946657df09:41129 2024-12-03T13:13:21,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:13:21,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:13:21,635 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/35946657df09,41129,1733231600825 2024-12-03T13:13:21,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:13:21,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:21,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T13:13:21,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:21,648 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:13:21,648 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T13:13:21,648 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/35946657df09,41129,1733231600825 from backup master directory 2024-12-03T13:13:21,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/35946657df09,41129,1733231600825 2024-12-03T13:13:21,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:13:21,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T13:13:21,655 WARN [master/35946657df09:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:13:21,655 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=35946657df09,41129,1733231600825 2024-12-03T13:13:21,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:13:21,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741826_1002 (size=42) 2024-12-03T13:13:21,747 DEBUG [master/35946657df09:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/hbase.id with ID: 3d8bdb5f-c2ed-4064-aefe-c1b9e63a0997 2024-12-03T13:13:21,766 INFO [master/35946657df09:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:21,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:21,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:21,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:13:21,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741827_1003 (size=196) 2024-12-03T13:13:21,850 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T13:13:21,851 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T13:13:21,855 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:13:21,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:13:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741828_1004 (size=1189) 2024-12-03T13:13:21,912 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store 2024-12-03T13:13:21,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:13:21,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741829_1005 (size=34) 2024-12-03T13:13:22,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:22,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:22,345 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:13:22,345 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:13:22,345 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:22,345 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:22,345 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:13:22,345 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:22,345 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:22,345 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:13:22,346 WARN [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/.initializing 2024-12-03T13:13:22,346 DEBUG [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/WALs/35946657df09,41129,1733231600825 2024-12-03T13:13:22,350 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C41129%2C1733231600825, suffix=, logDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/WALs/35946657df09,41129,1733231600825, archiveDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/oldWALs, maxLogs=10 2024-12-03T13:13:22,350 INFO [master/35946657df09:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C41129%2C1733231600825.1733231602350 2024-12-03T13:13:22,373 INFO [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/WALs/35946657df09,41129,1733231600825/35946657df09%2C41129%2C1733231600825.1733231602350 2024-12-03T13:13:22,373 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:46561:46561)] 2024-12-03T13:13:22,373 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:13:22,374 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:13:22,374 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,374 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T13:13:22,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:13:22,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T13:13:22,382 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:13:22,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T13:13:22,387 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:13:22,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T13:13:22,396 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:13:22,397 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,397 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,403 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T13:13:22,419 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T13:13:22,435 DEBUG [master/35946657df09:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:13:22,436 INFO [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811351, jitterRate=0.03168618679046631}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T13:13:22,436 DEBUG [master/35946657df09:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:13:22,436 INFO [master/35946657df09:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T13:13:22,448 DEBUG [master/35946657df09:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2adb02e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:13:22,449 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-03T13:13:22,449 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T13:13:22,449 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T13:13:22,449 INFO [master/35946657df09:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T13:13:22,450 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T13:13:22,450 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-03T13:13:22,450 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T13:13:22,466 INFO [master/35946657df09:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T13:13:22,467 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T13:13:22,519 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-03T13:13:22,519 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T13:13:22,521 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T13:13:22,531 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-03T13:13:22,531 INFO [master/35946657df09:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T13:13:22,547 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T13:13:22,561 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-03T13:13:22,565 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T13:13:22,571 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T13:13:22,587 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T13:13:22,596 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T13:13:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:13:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T13:13:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:22,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:22,606 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=35946657df09,41129,1733231600825, sessionid=0x1019c9d06900000, setting cluster-up flag (Was=false) 2024-12-03T13:13:22,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:22,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:22,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,648 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T13:13:22,649 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,41129,1733231600825 2024-12-03T13:13:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:22,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:22,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:22,696 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T13:13:22,697 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=35946657df09,41129,1733231600825 2024-12-03T13:13:22,711 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-03T13:13:22,711 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-03T13:13:22,712 INFO [master/35946657df09:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 35946657df09,41129,1733231600825 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/35946657df09:0, corePoolSize=5, maxPoolSize=5 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/35946657df09:0, corePoolSize=10, maxPoolSize=10 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,712 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:13:22,713 DEBUG [master/35946657df09:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,732 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:13:22,732 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-03T13:13:22,734 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,734 INFO [PEWorker-2 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:13:22,738 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733231632738 2024-12-03T13:13:22,739 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T13:13:22,742 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T13:13:22,743 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T13:13:22,743 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T13:13:22,743 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T13:13:22,743 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T13:13:22,746 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,751 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T13:13:22,751 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T13:13:22,751 DEBUG [RS:0;35946657df09:45683 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;35946657df09:45683 2024-12-03T13:13:22,751 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T13:13:22,760 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1008): ClusterId : 3d8bdb5f-c2ed-4064-aefe-c1b9e63a0997 2024-12-03T13:13:22,760 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T13:13:22,762 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T13:13:22,763 INFO [master/35946657df09:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T13:13:22,766 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231602763,5,FailOnTimeoutGroup] 2024-12-03T13:13:22,768 DEBUG [master/35946657df09:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231602767,5,FailOnTimeoutGroup] 2024-12-03T13:13:22,768 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,768 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T13:13:22,768 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,768 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,793 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T13:13:22,793 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T13:13:22,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:13:22,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741831_1007 (size=1039) 2024-12-03T13:13:22,811 INFO [PEWorker-2 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-03T13:13:22,811 INFO [PEWorker-2 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1 2024-12-03T13:13:22,812 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T13:13:22,812 DEBUG [RS:0;35946657df09:45683 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be08ec1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:13:22,819 DEBUG [RS:0;35946657df09:45683 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@673eaad2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:13:22,819 INFO [RS:0;35946657df09:45683 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T13:13:22,819 INFO [RS:0;35946657df09:45683 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T13:13:22,819 DEBUG [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T13:13:22,822 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(3073): reportForDuty to master=35946657df09,41129,1733231600825 with isa=35946657df09/172.17.0.2:45683, startcode=1733231601208 2024-12-03T13:13:22,822 DEBUG [RS:0;35946657df09:45683 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T13:13:22,843 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45811, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T13:13:22,844 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41129 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 35946657df09,45683,1733231601208 2024-12-03T13:13:22,844 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41129 {}] master.ServerManager(486): Registering regionserver=35946657df09,45683,1733231601208 2024-12-03T13:13:22,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:13:22,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741832_1008 (size=32) 2024-12-03T13:13:22,858 DEBUG [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1 2024-12-03T13:13:22,858 DEBUG [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41031 2024-12-03T13:13:22,858 DEBUG [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T13:13:22,859 DEBUG [PEWorker-2 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:13:22,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:13:22,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:13:22,868 DEBUG [RS:0;35946657df09:45683 {}] zookeeper.ZKUtil(111): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/35946657df09,45683,1733231601208 2024-12-03T13:13:22,868 WARN [RS:0;35946657df09:45683 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T13:13:22,869 INFO [RS:0;35946657df09:45683 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:13:22,869 DEBUG [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/35946657df09,45683,1733231601208 2024-12-03T13:13:22,870 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [35946657df09,45683,1733231601208] 2024-12-03T13:13:22,872 DEBUG [RS:0;35946657df09:45683 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T13:13:22,872 INFO [RS:0;35946657df09:45683 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T13:13:22,874 INFO [RS:0;35946657df09:45683 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T13:13:22,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:13:22,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:13:22,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:13:22,879 INFO [RS:0;35946657df09:45683 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T13:13:22,879 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,880 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:13:22,880 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,880 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:13:22,880 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:13:22,881 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:13:22,881 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:22,882 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:13:22,882 DEBUG [PEWorker-2 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740 2024-12-03T13:13:22,882 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T13:13:22,883 DEBUG [PEWorker-2 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740 2024-12-03T13:13:22,883 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/35946657df09:0, corePoolSize=2, maxPoolSize=2 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/35946657df09:0, corePoolSize=1, maxPoolSize=1 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:13:22,883 DEBUG [RS:0;35946657df09:45683 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/35946657df09:0, corePoolSize=3, maxPoolSize=3 2024-12-03T13:13:22,884 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:13:22,885 DEBUG [PEWorker-2 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:13:22,889 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:13:22,889 INFO [PEWorker-2 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763780, jitterRate=-0.02880394458770752}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:13:22,890 DEBUG [PEWorker-2 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:13:22,890 DEBUG [PEWorker-2 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:13:22,890 INFO [PEWorker-2 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:13:22,890 DEBUG [PEWorker-2 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:13:22,890 DEBUG [PEWorker-2 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:13:22,890 DEBUG [PEWorker-2 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:13:22,890 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,890 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,891 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,891 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,891 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45683,1733231601208-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:13:22,891 INFO [PEWorker-2 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:13:22,891 DEBUG [PEWorker-2 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:13:22,892 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T13:13:22,892 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-03T13:13:22,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T13:13:22,894 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T13:13:22,895 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T13:13:22,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:22,906 INFO [RS:0;35946657df09:45683 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T13:13:22,906 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,45683,1733231601208-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:22,920 INFO [RS:0;35946657df09:45683 {}] regionserver.Replication(204): 35946657df09,45683,1733231601208 started 2024-12-03T13:13:22,920 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1767): Serving as 35946657df09,45683,1733231601208, RpcServer on 35946657df09/172.17.0.2:45683, sessionid=0x1019c9d06900001 2024-12-03T13:13:22,920 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T13:13:22,920 DEBUG [RS:0;35946657df09:45683 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 35946657df09,45683,1733231601208 2024-12-03T13:13:22,920 DEBUG [RS:0;35946657df09:45683 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,45683,1733231601208' 2024-12-03T13:13:22,920 DEBUG [RS:0;35946657df09:45683 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T13:13:22,925 DEBUG [RS:0;35946657df09:45683 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T13:13:22,927 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T13:13:22,928 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T13:13:22,928 DEBUG [RS:0;35946657df09:45683 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 35946657df09,45683,1733231601208 2024-12-03T13:13:22,928 DEBUG [RS:0;35946657df09:45683 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '35946657df09,45683,1733231601208' 2024-12-03T13:13:22,928 DEBUG [RS:0;35946657df09:45683 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T13:13:22,929 DEBUG [RS:0;35946657df09:45683 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T13:13:22,929 DEBUG [RS:0;35946657df09:45683 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T13:13:22,929 INFO [RS:0;35946657df09:45683 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T13:13:22,929 INFO [RS:0;35946657df09:45683 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T13:13:23,031 INFO [RS:0;35946657df09:45683 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C45683%2C1733231601208, suffix=, logDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/35946657df09,45683,1733231601208, archiveDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/oldWALs, maxLogs=32 2024-12-03T13:13:23,032 INFO [RS:0;35946657df09:45683 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C45683%2C1733231601208.1733231603032 2024-12-03T13:13:23,046 WARN [35946657df09:41129 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-03T13:13:23,099 INFO [RS:0;35946657df09:45683 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/35946657df09,45683,1733231601208/35946657df09%2C45683%2C1733231601208.1733231603032 2024-12-03T13:13:23,099 DEBUG [RS:0;35946657df09:45683 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46561:46561),(127.0.0.1/127.0.0.1:41647:41647)] 2024-12-03T13:13:23,157 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T13:13:23,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T13:13:23,206 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-03T13:13:23,296 DEBUG [35946657df09:41129 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T13:13:23,296 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=35946657df09,45683,1733231601208 2024-12-03T13:13:23,297 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,45683,1733231601208, state=OPENING 2024-12-03T13:13:23,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:23,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:23,338 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T13:13:23,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:23,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:23,347 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:13:23,347 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=35946657df09,45683,1733231601208}] 2024-12-03T13:13:23,347 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:13:23,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 35946657df09,45683,1733231601208 2024-12-03T13:13:23,499 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T13:13:23,501 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52674, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T13:13:23,504 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-03T13:13:23,504 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:13:23,505 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=35946657df09%2C45683%2C1733231601208.meta, suffix=.meta, logDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/35946657df09,45683,1733231601208, archiveDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/oldWALs, maxLogs=32 2024-12-03T13:13:23,506 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 35946657df09%2C45683%2C1733231601208.meta.1733231603506.meta 2024-12-03T13:13:23,527 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/35946657df09,45683,1733231601208/35946657df09%2C45683%2C1733231601208.meta.1733231603506.meta 2024-12-03T13:13:23,527 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:46561:46561)] 2024-12-03T13:13:23,528 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:13:23,528 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T13:13:23,528 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T13:13:23,528 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T13:13:23,528 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T13:13:23,528 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:13:23,528 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-03T13:13:23,528 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-03T13:13:23,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T13:13:23,533 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T13:13:23,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:23,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:13:23,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T13:13:23,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T13:13:23,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:23,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:13:23,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T13:13:23,535 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T13:13:23,535 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:23,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T13:13:23,537 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740 2024-12-03T13:13:23,537 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740 2024-12-03T13:13:23,539 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T13:13:23,540 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T13:13:23,541 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797967, jitterRate=0.014668554067611694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T13:13:23,541 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T13:13:23,542 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733231603499 2024-12-03T13:13:23,543 DEBUG [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T13:13:23,543 INFO [RS_OPEN_META-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-03T13:13:23,544 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,45683,1733231601208 2024-12-03T13:13:23,544 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 35946657df09,45683,1733231601208, state=OPEN 2024-12-03T13:13:23,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:13:23,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T13:13:23,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:13:23,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T13:13:23,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=35946657df09,45683,1733231601208 in 228 msec 2024-12-03T13:13:23,578 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T13:13:23,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T13:13:23,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 686 msec 2024-12-03T13:13:23,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 874 msec 2024-12-03T13:13:23,583 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733231603583, completionTime=-1 2024-12-03T13:13:23,583 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T13:13:23,583 DEBUG [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-03T13:13:23,591 DEBUG [hconnection-0x59774913-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:13:23,592 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:13:23,593 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-03T13:13:23,593 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733231663593 2024-12-03T13:13:23,593 INFO [master/35946657df09:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733231723593 2024-12-03T13:13:23,593 INFO [master/35946657df09:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 10 msec 2024-12-03T13:13:23,640 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41129,1733231600825-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:23,640 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41129,1733231600825-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:23,640 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41129,1733231600825-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:23,640 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-35946657df09:41129, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:23,640 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:23,640 INFO [master/35946657df09:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-03T13:13:23,640 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T13:13:23,642 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-03T13:13:23,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T13:13:23,644 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:23,644 DEBUG [master/35946657df09:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-03T13:13:23,645 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T13:13:23,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:13:23,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741835_1011 (size=358) 2024-12-03T13:13:23,688 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c437a0e647d1f1f9f29c355e6a1ae1a0, NAME => 'hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1 2024-12-03T13:13:23,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:13:23,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741836_1012 (size=42) 2024-12-03T13:13:23,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:13:23,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing c437a0e647d1f1f9f29c355e6a1ae1a0, disabling compactions & flushes 2024-12-03T13:13:23,734 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:23,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:23,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. after waiting 0 ms 2024-12-03T13:13:23,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:23,734 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:23,735 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for c437a0e647d1f1f9f29c355e6a1ae1a0: 2024-12-03T13:13:23,736 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T13:13:23,736 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733231603736"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733231603736"}]},"ts":"1733231603736"} 2024-12-03T13:13:23,739 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T13:13:23,740 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T13:13:23,740 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231603740"}]},"ts":"1733231603740"} 2024-12-03T13:13:23,742 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-03T13:13:23,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c437a0e647d1f1f9f29c355e6a1ae1a0, ASSIGN}] 2024-12-03T13:13:23,773 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c437a0e647d1f1f9f29c355e6a1ae1a0, ASSIGN 2024-12-03T13:13:23,773 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=c437a0e647d1f1f9f29c355e6a1ae1a0, ASSIGN; state=OFFLINE, location=35946657df09,45683,1733231601208; forceNewPlan=false, retain=false 2024-12-03T13:13:23,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:23,924 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c437a0e647d1f1f9f29c355e6a1ae1a0, regionState=OPENING, regionLocation=35946657df09,45683,1733231601208 2024-12-03T13:13:23,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure c437a0e647d1f1f9f29c355e6a1ae1a0, server=35946657df09,45683,1733231601208}] 2024-12-03T13:13:24,079 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 35946657df09,45683,1733231601208 2024-12-03T13:13:24,087 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:24,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => c437a0e647d1f1f9f29c355e6a1ae1a0, NAME => 'hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0.', STARTKEY => '', ENDKEY => ''} 2024-12-03T13:13:24,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T13:13:24,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,115 INFO [StoreOpener-c437a0e647d1f1f9f29c355e6a1ae1a0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,120 INFO [StoreOpener-c437a0e647d1f1f9f29c355e6a1ae1a0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c437a0e647d1f1f9f29c355e6a1ae1a0 columnFamilyName info 2024-12-03T13:13:24,121 DEBUG [StoreOpener-c437a0e647d1f1f9f29c355e6a1ae1a0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T13:13:24,123 INFO [StoreOpener-c437a0e647d1f1f9f29c355e6a1ae1a0-1 {}] regionserver.HStore(327): Store=c437a0e647d1f1f9f29c355e6a1ae1a0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T13:13:24,125 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,126 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,128 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,135 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T13:13:24,136 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened c437a0e647d1f1f9f29c355e6a1ae1a0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696036, jitterRate=-0.11494456231594086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T13:13:24,136 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for c437a0e647d1f1f9f29c355e6a1ae1a0: 2024-12-03T13:13:24,137 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0., pid=6, masterSystemTime=1733231604079 2024-12-03T13:13:24,139 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:24,139 INFO [RS_OPEN_PRIORITY_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:24,143 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c437a0e647d1f1f9f29c355e6a1ae1a0, regionState=OPEN, openSeqNum=2, regionLocation=35946657df09,45683,1733231601208 2024-12-03T13:13:24,148 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T13:13:24,148 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure c437a0e647d1f1f9f29c355e6a1ae1a0, server=35946657df09,45683,1733231601208 in 219 msec 2024-12-03T13:13:24,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T13:13:24,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=c437a0e647d1f1f9f29c355e6a1ae1a0, ASSIGN in 378 msec 2024-12-03T13:13:24,151 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T13:13:24,151 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733231604151"}]},"ts":"1733231604151"} 2024-12-03T13:13:24,152 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-03T13:13:24,189 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T13:13:24,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 549 msec 2024-12-03T13:13:24,244 DEBUG [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-03T13:13:24,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:24,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:13:24,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:24,257 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-03T13:13:24,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:13:24,299 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 41 msec 2024-12-03T13:13:24,310 DEBUG [master/35946657df09:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-03T13:13:24,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:24,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:24,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T13:13:24,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 38 msec 2024-12-03T13:13:24,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-03T13:13:24,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-03T13:13:24,393 INFO [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.738sec 2024-12-03T13:13:24,393 INFO [master/35946657df09:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T13:13:24,393 INFO [master/35946657df09:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T13:13:24,393 INFO [master/35946657df09:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T13:13:24,393 INFO [master/35946657df09:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T13:13:24,394 INFO [master/35946657df09:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T13:13:24,394 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41129,1733231600825-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T13:13:24,394 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41129,1733231600825-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T13:13:24,395 DEBUG [master/35946657df09:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-03T13:13:24,395 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T13:13:24,395 INFO [master/35946657df09:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=35946657df09,41129,1733231600825-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T13:13:24,418 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x711f72be to 127.0.0.1:54151 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63904f43 2024-12-03T13:13:24,430 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@308a6248, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T13:13:24,443 DEBUG [hconnection-0x56a3de79-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T13:13:24,444 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T13:13:24,451 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=35946657df09,41129,1733231600825 2024-12-03T13:13:24,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T13:13:24,456 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-03T13:13:24,456 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T13:13:24,458 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/test.com,8080,1, archiveDir=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/oldWALs, maxLogs=32 2024-12-03T13:13:24,458 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733231604458 2024-12-03T13:13:24,485 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/test.com,8080,1/test.com%2C8080%2C1.1733231604458 2024-12-03T13:13:24,485 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:46561:46561)] 2024-12-03T13:13:24,485 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733231604485 2024-12-03T13:13:24,499 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/test.com,8080,1/test.com%2C8080%2C1.1733231604458 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/test.com,8080,1/test.com%2C8080%2C1.1733231604485 2024-12-03T13:13:24,499 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46561:46561),(127.0.0.1/127.0.0.1:41647:41647)] 2024-12-03T13:13:24,499 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/test.com,8080,1/test.com%2C8080%2C1.1733231604458 is not closed yet, will try archiving it next time 2024-12-03T13:13:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741837_1013 (size=93) 2024-12-03T13:13:24,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741837_1013 (size=93) 2024-12-03T13:13:24,507 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/test.com,8080,1 2024-12-03T13:13:24,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741838_1014 (size=93) 2024-12-03T13:13:24,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741838_1014 (size=93) 2024-12-03T13:13:24,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/MasterData/WALs/35946657df09,40851,1733231279554/35946657df09%2C40851%2C1733231279554.1733231279886 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:24,903 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/test.com,8080,1/test.com%2C8080%2C1.1733231604458 to hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/oldWALs/test.com%2C8080%2C1.1733231604458 2024-12-03T13:13:24,905 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/oldWALs 2024-12-03T13:13:24,905 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733231604485) 2024-12-03T13:13:24,905 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-03T13:13:24,905 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x711f72be to 127.0.0.1:54151 2024-12-03T13:13:24,905 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:24,905 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T13:13:24,905 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2091432087, stopped=false 2024-12-03T13:13:24,905 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=35946657df09,41129,1733231600825 2024-12-03T13:13:24,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:13:24,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T13:13:24,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:24,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:24,947 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-03T13:13:24,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:13:24,947 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:24,947 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,45683,1733231601208' ***** 2024-12-03T13:13:24,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T13:13:24,947 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T13:13:24,949 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(3579): Received CLOSE for c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,45683,1733231601208 2024-12-03T13:13:24,949 DEBUG [RS:0;35946657df09:45683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T13:13:24,949 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing c437a0e647d1f1f9f29c355e6a1ae1a0, disabling compactions & flushes 2024-12-03T13:13:24,949 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:24,949 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T13:13:24,949 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:24,949 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. after waiting 0 ms 2024-12-03T13:13:24,949 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:24,950 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing c437a0e647d1f1f9f29c355e6a1ae1a0 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-03T13:13:24,950 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-03T13:13:24,950 DEBUG [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1603): Online Regions={c437a0e647d1f1f9f29c355e6a1ae1a0=hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T13:13:24,950 DEBUG [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, c437a0e647d1f1f9f29c355e6a1ae1a0 2024-12-03T13:13:24,950 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T13:13:24,950 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T13:13:24,950 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T13:13:24,950 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T13:13:24,950 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T13:13:24,950 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-03T13:13:24,976 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0/.tmp/info/82f3752b5fdf4a53b4a465d08d8ea605 is 45, key is default/info:d/1733231604267/Put/seqid=0 2024-12-03T13:13:24,978 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/.tmp/info/b6cc5817c18f470e8b9792c62810ae1b is 143, key is hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0./info:regioninfo/1733231604143/Put/seqid=0 2024-12-03T13:13:24,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741839_1015 (size=5037) 2024-12-03T13:13:24,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741839_1015 (size=5037) 2024-12-03T13:13:24,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741840_1016 (size=6595) 2024-12-03T13:13:24,987 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0/.tmp/info/82f3752b5fdf4a53b4a465d08d8ea605 2024-12-03T13:13:24,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741840_1016 (size=6595) 2024-12-03T13:13:24,996 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/.tmp/info/b6cc5817c18f470e8b9792c62810ae1b 2024-12-03T13:13:25,001 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0/.tmp/info/82f3752b5fdf4a53b4a465d08d8ea605 as hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0/info/82f3752b5fdf4a53b4a465d08d8ea605 2024-12-03T13:13:25,010 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0/info/82f3752b5fdf4a53b4a465d08d8ea605, entries=2, sequenceid=6, filesize=4.9 K 2024-12-03T13:13:25,011 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for c437a0e647d1f1f9f29c355e6a1ae1a0 in 62ms, sequenceid=6, compaction requested=false 2024-12-03T13:13:25,011 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-03T13:13:25,036 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/.tmp/table/75c5bb655fe84ce9bc47ddc8ff62e7c5 is 51, key is hbase:namespace/table:state/1733231604151/Put/seqid=0 2024-12-03T13:13:25,039 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/namespace/c437a0e647d1f1f9f29c355e6a1ae1a0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T13:13:25,040 INFO [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:25,040 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for c437a0e647d1f1f9f29c355e6a1ae1a0: 2024-12-03T13:13:25,040 DEBUG [RS_CLOSE_REGION-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733231603640.c437a0e647d1f1f9f29c355e6a1ae1a0. 2024-12-03T13:13:25,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741841_1017 (size=5242) 2024-12-03T13:13:25,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741841_1017 (size=5242) 2024-12-03T13:13:25,042 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/.tmp/table/75c5bb655fe84ce9bc47ddc8ff62e7c5 2024-12-03T13:13:25,049 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/.tmp/info/b6cc5817c18f470e8b9792c62810ae1b as hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/info/b6cc5817c18f470e8b9792c62810ae1b 2024-12-03T13:13:25,053 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/info/b6cc5817c18f470e8b9792c62810ae1b, entries=10, sequenceid=9, filesize=6.4 K 2024-12-03T13:13:25,054 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/.tmp/table/75c5bb655fe84ce9bc47ddc8ff62e7c5 as hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/table/75c5bb655fe84ce9bc47ddc8ff62e7c5 2024-12-03T13:13:25,059 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/table/75c5bb655fe84ce9bc47ddc8ff62e7c5, entries=2, sequenceid=9, filesize=5.1 K 2024-12-03T13:13:25,063 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=9, compaction requested=false 2024-12-03T13:13:25,063 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T13:13:25,083 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-03T13:13:25,084 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T13:13:25,084 INFO [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T13:13:25,084 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T13:13:25,084 DEBUG [RS_CLOSE_META-regionserver/35946657df09:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T13:13:25,150 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,45683,1733231601208; all regions closed. 2024-12-03T13:13:25,150 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/35946657df09,45683,1733231601208 2024-12-03T13:13:25,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741834_1010 (size=2484) 2024-12-03T13:13:25,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741834_1010 (size=2484) 2024-12-03T13:13:25,153 DEBUG [RS:0;35946657df09:45683 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/oldWALs 2024-12-03T13:13:25,153 INFO [RS:0;35946657df09:45683 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C45683%2C1733231601208.meta:.meta(num 1733231603506) 2024-12-03T13:13:25,154 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/WALs/35946657df09,45683,1733231601208 2024-12-03T13:13:25,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741833_1009 (size=1414) 2024-12-03T13:13:25,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741833_1009 (size=1414) 2024-12-03T13:13:25,157 DEBUG [RS:0;35946657df09:45683 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/oldWALs 2024-12-03T13:13:25,157 INFO [RS:0;35946657df09:45683 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 35946657df09%2C45683%2C1733231601208:(num 1733231603032) 2024-12-03T13:13:25,157 DEBUG [RS:0;35946657df09:45683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:25,157 INFO [RS:0;35946657df09:45683 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T13:13:25,157 INFO [RS:0;35946657df09:45683 {}] hbase.ChoreService(370): Chore service for: regionserver/35946657df09:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-03T13:13:25,157 INFO [regionserver/35946657df09:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:13:25,157 INFO [RS:0;35946657df09:45683 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45683 2024-12-03T13:13:25,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/35946657df09,45683,1733231601208 2024-12-03T13:13:25,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T13:13:25,171 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [35946657df09,45683,1733231601208] 2024-12-03T13:13:25,171 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 35946657df09,45683,1733231601208; numProcessing=1 2024-12-03T13:13:25,180 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/35946657df09,45683,1733231601208 already deleted, retry=false 2024-12-03T13:13:25,180 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 35946657df09,45683,1733231601208 expired; onlineServers=0 2024-12-03T13:13:25,180 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '35946657df09,41129,1733231600825' ***** 2024-12-03T13:13:25,180 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T13:13:25,180 DEBUG [M:0;35946657df09:41129 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4433c1f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=35946657df09/172.17.0.2:0 2024-12-03T13:13:25,180 INFO [M:0;35946657df09:41129 {}] regionserver.HRegionServer(1224): stopping server 35946657df09,41129,1733231600825 2024-12-03T13:13:25,180 INFO [M:0;35946657df09:41129 {}] regionserver.HRegionServer(1250): stopping server 35946657df09,41129,1733231600825; all regions closed. 2024-12-03T13:13:25,180 DEBUG [M:0;35946657df09:41129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T13:13:25,180 DEBUG [M:0;35946657df09:41129 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T13:13:25,180 DEBUG [M:0;35946657df09:41129 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T13:13:25,180 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T13:13:25,180 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231602763 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.large.0-1733231602763,5,FailOnTimeoutGroup] 2024-12-03T13:13:25,180 INFO [M:0;35946657df09:41129 {}] hbase.ChoreService(370): Chore service for: master/35946657df09:0 had [] on shutdown 2024-12-03T13:13:25,180 DEBUG [M:0;35946657df09:41129 {}] master.HMaster(1733): Stopping service threads 2024-12-03T13:13:25,180 INFO [M:0;35946657df09:41129 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T13:13:25,181 INFO [M:0;35946657df09:41129 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T13:13:25,181 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T13:13:25,181 DEBUG [master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231602767 {}] cleaner.HFileCleaner(306): Exit Thread[master/35946657df09:0:becomeActiveMaster-HFileCleaner.small.0-1733231602767,5,FailOnTimeoutGroup] 2024-12-03T13:13:25,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T13:13:25,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T13:13:25,188 DEBUG [M:0;35946657df09:41129 {}] zookeeper.ZKUtil(347): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T13:13:25,188 WARN [M:0;35946657df09:41129 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T13:13:25,188 INFO [M:0;35946657df09:41129 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-03T13:13:25,188 INFO [M:0;35946657df09:41129 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T13:13:25,188 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T13:13:25,188 INFO [M:0;35946657df09:41129 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:25,188 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:25,188 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T13:13:25,188 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:25,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T13:13:25,188 INFO [M:0;35946657df09:41129 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-03T13:13:25,206 DEBUG [M:0;35946657df09:41129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b11a5a8b59f54c0fa7857bcd646176db is 82, key is hbase:meta,,1/info:regioninfo/1733231603544/Put/seqid=0 2024-12-03T13:13:25,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741842_1018 (size=5672) 2024-12-03T13:13:25,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741842_1018 (size=5672) 2024-12-03T13:13:25,211 INFO [M:0;35946657df09:41129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b11a5a8b59f54c0fa7857bcd646176db 2024-12-03T13:13:25,227 DEBUG [M:0;35946657df09:41129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/962f681c4697421b9e456a8a527f5b1f is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733231604190/Put/seqid=0 2024-12-03T13:13:25,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741843_1019 (size=6626) 2024-12-03T13:13:25,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741843_1019 (size=6626) 2024-12-03T13:13:25,232 INFO [M:0;35946657df09:41129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/962f681c4697421b9e456a8a527f5b1f 2024-12-03T13:13:25,247 DEBUG [M:0;35946657df09:41129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1db8f1bc950b4de3b7c5b4c2ad12ac1b is 69, key is 35946657df09,45683,1733231601208/rs:state/1733231602844/Put/seqid=0 2024-12-03T13:13:25,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741844_1020 (size=5156) 2024-12-03T13:13:25,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741844_1020 (size=5156) 2024-12-03T13:13:25,251 INFO [M:0;35946657df09:41129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1db8f1bc950b4de3b7c5b4c2ad12ac1b 2024-12-03T13:13:25,266 DEBUG [M:0;35946657df09:41129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8b8c8ec1db3d4f2b9f3a47d723752dfa is 52, key is load_balancer_on/state:d/1733231604453/Put/seqid=0 2024-12-03T13:13:25,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741845_1021 (size=5056) 2024-12-03T13:13:25,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741845_1021 (size=5056) 2024-12-03T13:13:25,271 INFO [M:0;35946657df09:41129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8b8c8ec1db3d4f2b9f3a47d723752dfa 2024-12-03T13:13:25,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:25,271 INFO [RS:0;35946657df09:45683 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,45683,1733231601208; zookeeper connection closed. 2024-12-03T13:13:25,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45683-0x1019c9d06900001, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:25,272 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6fe1adb9 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6fe1adb9 2024-12-03T13:13:25,272 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T13:13:25,275 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b11a5a8b59f54c0fa7857bcd646176db as hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b11a5a8b59f54c0fa7857bcd646176db 2024-12-03T13:13:25,278 INFO [M:0;35946657df09:41129 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b11a5a8b59f54c0fa7857bcd646176db, entries=8, sequenceid=70, filesize=5.5 K 2024-12-03T13:13:25,279 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/962f681c4697421b9e456a8a527f5b1f as hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/962f681c4697421b9e456a8a527f5b1f 2024-12-03T13:13:25,283 INFO [M:0;35946657df09:41129 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/962f681c4697421b9e456a8a527f5b1f, entries=8, sequenceid=70, filesize=6.5 K 2024-12-03T13:13:25,284 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1db8f1bc950b4de3b7c5b4c2ad12ac1b as hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1db8f1bc950b4de3b7c5b4c2ad12ac1b 2024-12-03T13:13:25,288 INFO [M:0;35946657df09:41129 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1db8f1bc950b4de3b7c5b4c2ad12ac1b, entries=1, sequenceid=70, filesize=5.0 K 2024-12-03T13:13:25,288 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8b8c8ec1db3d4f2b9f3a47d723752dfa as hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8b8c8ec1db3d4f2b9f3a47d723752dfa 2024-12-03T13:13:25,292 INFO [M:0;35946657df09:41129 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41031/user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8b8c8ec1db3d4f2b9f3a47d723752dfa, entries=1, sequenceid=70, filesize=4.9 K 2024-12-03T13:13:25,293 INFO [M:0;35946657df09:41129 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 105ms, sequenceid=70, compaction requested=false 2024-12-03T13:13:25,294 INFO [M:0;35946657df09:41129 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T13:13:25,294 DEBUG [M:0;35946657df09:41129 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T13:13:25,294 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/955328c3-2719-4f6f-a30c-60fcdeece4d1/MasterData/WALs/35946657df09,41129,1733231600825 2024-12-03T13:13:25,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34715 is added to blk_1073741830_1006 (size=31030) 2024-12-03T13:13:25,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45117 is added to blk_1073741830_1006 (size=31030) 2024-12-03T13:13:25,297 INFO [M:0;35946657df09:41129 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-03T13:13:25,297 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T13:13:25,297 INFO [M:0;35946657df09:41129 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41129 2024-12-03T13:13:25,305 DEBUG [M:0;35946657df09:41129 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/35946657df09,41129,1733231600825 already deleted, retry=false 2024-12-03T13:13:25,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.1733231280409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:25,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42201/user/jenkins/test-data/cd8f0fa8-ebd5-cb65-63a1-65d80326252f/WALs/35946657df09,42823,1733231279724/35946657df09%2C42823%2C1733231279724.meta.1733231280958.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor109.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T13:13:25,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:25,413 INFO [M:0;35946657df09:41129 {}] regionserver.HRegionServer(1307): Exiting; stopping=35946657df09,41129,1733231600825; zookeeper connection closed. 2024-12-03T13:13:25,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41129-0x1019c9d06900000, quorum=127.0.0.1:54151, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T13:13:25,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31b05750{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:13:25,417 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78f9a22a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:13:25,417 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:13:25,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ad706ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:13:25,417 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38c3eed5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.log.dir/,STOPPED} 2024-12-03T13:13:25,418 WARN [BP-2079988190-172.17.0.2-1733231598815 heartbeating to localhost/127.0.0.1:41031 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:13:25,418 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:13:25,418 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:13:25,418 WARN [BP-2079988190-172.17.0.2-1733231598815 heartbeating to localhost/127.0.0.1:41031 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2079988190-172.17.0.2-1733231598815 (Datanode Uuid a4b78c45-4791-4db5-b009-f6e732f86394) service to localhost/127.0.0.1:41031 2024-12-03T13:13:25,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data3/current/BP-2079988190-172.17.0.2-1733231598815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:25,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data4/current/BP-2079988190-172.17.0.2-1733231598815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:25,419 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:13:25,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25b7e8a0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T13:13:25,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21af4670{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:13:25,422 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:13:25,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37b4b146{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:13:25,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f856bbb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.log.dir/,STOPPED} 2024-12-03T13:13:25,423 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T13:13:25,423 WARN [BP-2079988190-172.17.0.2-1733231598815 heartbeating to localhost/127.0.0.1:41031 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T13:13:25,423 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T13:13:25,423 WARN [BP-2079988190-172.17.0.2-1733231598815 heartbeating to localhost/127.0.0.1:41031 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2079988190-172.17.0.2-1733231598815 (Datanode Uuid 0abeb825-835c-41f0-b088-518b1034df39) service to localhost/127.0.0.1:41031 2024-12-03T13:13:25,423 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data1/current/BP-2079988190-172.17.0.2-1733231598815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:25,424 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/cluster_e0f154dc-1333-2f7b-b4f0-93d72c44468c/dfs/data/data2/current/BP-2079988190-172.17.0.2-1733231598815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T13:13:25,424 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T13:13:25,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e575f71{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T13:13:25,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f55d86f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T13:13:25,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T13:13:25,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63976b67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T13:13:25,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@249f3f68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/484b0b1c-0bb0-67de-4050-38ae8d240fea/hadoop.log.dir/,STOPPED} 2024-12-03T13:13:25,436 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-03T13:13:25,449 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-03T13:13:25,455 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=146 (was 123) - Thread LEAK? -, OpenFileDescriptor=513 (was 484) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=625 (was 619) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1372 (was 2013)